Home
last modified time | relevance | path

Searched refs:cm_id (Results 1 – 25 of 50) sorted by relevance

12

/linux/drivers/infiniband/core/
H A Diwcm.c97 struct iwcm_id_private *cm_id; member
154 list_add(&work->free_list, &work->cm_id->work_free_list); in put_work()
178 work->cm_id = cm_id_priv; in alloc_work_entries()
222 static void add_ref(struct iw_cm_id *cm_id) in add_ref() argument
225 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in add_ref()
229 static void rem_ref(struct iw_cm_id *cm_id) in rem_ref() argument
233 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in rem_ref()
238 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
305 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt) in iw_cm_disconnect() argument
312 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in iw_cm_disconnect()
[all …]
H A Dcm_trace.h139 const struct ib_cm_id *cm_id
142 TP_ARGS(cm_id),
145 __field(const void *, cm_id) /* for eBPF scripts */
153 __entry->cm_id = cm_id;
154 __entry->local_id = be32_to_cpu(cm_id->local_id);
155 __entry->remote_id = be32_to_cpu(cm_id->remote_id);
156 __entry->state = cm_id->state;
157 __entry->lap_state = cm_id->lap_state;
171 const struct ib_cm_id *cm_id \
173 TP_ARGS(cm_id))
[all …]
H A Ducma.c92 struct rdma_cm_id *cm_id; member
168 if (!ctx->cm_id->device) { in ucma_get_ctx_dev()
186 rdma_destroy_id(ctx->cm_id); in ucma_close_id()
189 ctx->cm_id = NULL; in ucma_close_id()
216 struct rdma_cm_id *cm_id) in ucma_set_ctx_cm_id() argument
219 ctx->cm_id = cm_id; in ucma_set_ctx_cm_id()
283 if (ctx->cm_id->qp_type == IB_QPT_UD) in ucma_create_uevent()
284 ucma_copy_ud_event(ctx->cm_id->device, &uevent->resp.param.ud, in ucma_create_uevent()
295 static int ucma_connect_event_handler(struct rdma_cm_id *cm_id, in ucma_connect_event_handler() argument
298 struct ucma_context *listen_ctx = cm_id->context; in ucma_connect_event_handler()
[all …]
H A Dcma.c142 return id_priv->cm_id.iw; in rdma_iw_cm_id()
1275 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) in rdma_init_qp_attr()
1278 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, in rdma_init_qp_attr()
1284 if (!id_priv->cm_id.iw) { in rdma_init_qp_attr()
1288 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, in rdma_init_qp_attr()
1789 const struct ib_cm_id *cm_id, in cma_find_listener() argument
1803 if (id_priv->id.device == cm_id->device && in cma_find_listener()
1809 if (id_priv_dev->id.device == cm_id->device && in cma_find_listener()
1821 cma_ib_id_from_event(struct ib_cm_id *cm_id, in cma_ib_id_from_event() argument
1883 id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev); in cma_ib_id_from_event()
[all …]
/linux/net/rds/
H A Drdma_transport.c49 static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, in rds_rdma_cm_event_handler_cmn() argument
54 struct rds_connection *conn = cm_id->context; in rds_rdma_cm_event_handler_cmn()
60 rdsdebug("conn %p id %p handling event %u (%s)\n", conn, cm_id, in rds_rdma_cm_event_handler_cmn()
63 if (cm_id->device->node_type == RDMA_NODE_IB_CA) in rds_rdma_cm_event_handler_cmn()
85 ret = trans->cm_handle_connect(cm_id, event, isv6); in rds_rdma_cm_event_handler_cmn()
90 rdma_set_service_type(cm_id, conn->c_tos); in rds_rdma_cm_event_handler_cmn()
91 rdma_set_min_rnr_timer(cm_id, IB_RNR_TIMER_000_32); in rds_rdma_cm_event_handler_cmn()
93 ret = rdma_resolve_route(cm_id, in rds_rdma_cm_event_handler_cmn()
106 if (ibic && ibic->i_cm_id == cm_id) { in rds_rdma_cm_event_handler_cmn()
107 cm_id->route.path_rec[0].sl = in rds_rdma_cm_event_handler_cmn()
[all …]
H A Dib.c410 struct rdma_cm_id *cm_id; in rds_ib_laddr_check() local
422 cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, in rds_ib_laddr_check()
424 if (IS_ERR(cm_id)) in rds_ib_laddr_check()
425 return PTR_ERR(cm_id); in rds_ib_laddr_check()
475 ret = rdma_bind_addr(cm_id, sa); in rds_ib_laddr_check()
478 if (ret || !cm_id->device || in rds_ib_laddr_check()
479 cm_id->device->node_type != RDMA_NODE_IB_CA) in rds_ib_laddr_check()
484 cm_id->device ? cm_id->device->node_type : -1); in rds_ib_laddr_check()
487 rdma_destroy_id(cm_id); in rds_ib_laddr_check()
H A Drdma_transport.h20 int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
22 int rds6_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
/linux/drivers/infiniband/hw/qedr/
H A Dqedr_iw_cm.c97 if (ep->cm_id) in qedr_iw_free_ep()
98 ep->cm_id->rem_ref(ep->cm_id); in qedr_iw_free_ep()
135 listener->cm_id->event_handler(listener->cm_id, &event); in qedr_iw_mpa_request()
166 if (ep->cm_id) in qedr_iw_issue_event()
167 ep->cm_id->event_handler(ep->cm_id, &event); in qedr_iw_issue_event()
175 if (ep->cm_id) in qedr_iw_close_event()
239 if (ep->cm_id) in qedr_iw_disconnect_worker()
240 ep->cm_id->event_handler(ep->cm_id, &event); in qedr_iw_disconnect_worker()
532 int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) in qedr_iw_connect() argument
534 struct qedr_dev *dev = get_qedr_dev(cm_id->device); in qedr_iw_connect()
[all …]
H A Dqedr_iw_cm.h34 int qedr_iw_connect(struct iw_cm_id *cm_id,
37 int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog);
39 int qedr_iw_destroy_listen(struct iw_cm_id *cm_id);
41 int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
43 int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
/linux/include/rdma/
H A Diw_cm.h42 typedef int (*iw_cm_handler)(struct iw_cm_id *cm_id,
53 typedef int (*iw_event_handler)(struct iw_cm_id *cm_id,
115 void iw_destroy_cm_id(struct iw_cm_id *cm_id);
128 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog);
144 int iw_cm_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param);
158 int iw_cm_reject(struct iw_cm_id *cm_id, const void *private_data,
173 int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param);
185 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt);
196 int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, struct ib_qp_attr *qp_attr,
H A Dib_cm.h289 typedef int (*ib_cm_handler)(struct ib_cm_id *cm_id,
325 void ib_destroy_cm_id(struct ib_cm_id *cm_id);
343 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id);
380 int ib_send_cm_req(struct ib_cm_id *cm_id,
405 int ib_send_cm_rep(struct ib_cm_id *cm_id,
416 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
429 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
444 int ib_send_cm_drep(struct ib_cm_id *cm_id,
462 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event);
476 int ib_send_cm_rej(struct ib_cm_id *cm_id,
[all …]
/linux/drivers/nvme/target/
H A Drdma.c90 struct rdma_cm_id *cm_id; member
123 struct rdma_cm_id *cm_id; member
629 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_init() local
634 ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp, in nvmet_rdma_rw_ctx_init()
635 cm_id->port_num, req->sg, req->sg_cnt, in nvmet_rdma_rw_ctx_init()
639 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, in nvmet_rdma_rw_ctx_init()
648 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_destroy() local
652 rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp, in nvmet_rdma_rw_ctx_destroy()
653 cm_id->port_num, req->sg, req->sg_cnt, in nvmet_rdma_rw_ctx_destroy()
657 rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num, in nvmet_rdma_rw_ctx_destroy()
[all …]
/linux/net/9p/
H A Dtrans_rdma.c76 struct rdma_cm_id *cm_id; member
280 rdma_disconnect(rdma->cm_id); in p9_cm_event_handler()
301 ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, in recv_done()
348 ib_dma_unmap_single(rdma->cm_id->device, in send_done()
376 if (rdma->cm_id && !IS_ERR(rdma->cm_id)) in rdma_destroy_trans()
377 rdma_destroy_id(rdma->cm_id); in rdma_destroy_trans()
390 c->busa = ib_dma_map_single(rdma->cm_id->device, in post_recv()
393 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) in post_recv()
409 ib_dma_unmap_single(rdma->cm_id->device, c->busa, in post_recv()
485 c->busa = ib_dma_map_single(rdma->cm_id->device, in rdma_request()
[all …]
/linux/drivers/infiniband/hw/irdma/
H A Dcm.c137 struct iw_cm_id *cm_id, in irdma_get_cmevent_info() argument
140 memcpy(&event->local_addr, &cm_id->m_local_addr, in irdma_get_cmevent_info()
142 memcpy(&event->remote_addr, &cm_id->m_remote_addr, in irdma_get_cmevent_info()
160 struct iw_cm_id *cm_id, in irdma_send_cm_event() argument
167 trace_irdma_send_cm_event(cm_node, cm_id, type, status, in irdma_send_cm_event()
172 cm_node, cm_id, cm_node->accelerated, cm_node->state, type, in irdma_send_cm_event()
187 irdma_get_cmevent_info(cm_node, cm_id, &event); in irdma_send_cm_event()
203 return cm_id->event_handler(cm_id, &event); in irdma_send_cm_event()
234 if (!cm_node->cm_id) in irdma_create_event()
250 event->cm_info.cm_id = cm_node->cm_id; in irdma_create_event()
[all …]
H A Dcm.h245 struct iw_cm_id *cm_id; member
296 struct iw_cm_id *cm_id; member
330 struct iw_cm_id *cm_id; member
394 int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
395 int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
396 int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
397 int irdma_create_listen(struct iw_cm_id *cm_id, int backlog);
398 int irdma_destroy_listen(struct iw_cm_id *cm_id);
H A Dtrace_cm.h201 TP_PROTO(struct irdma_cm_node *cm_node, struct iw_cm_id *cm_id,
203 TP_ARGS(cm_node, cm_id, type, status, caller),
206 __field(struct iw_cm_id *, cm_id)
222 __entry->cm_id = cm_id;
241 __entry->cm_id,
257 TP_PROTO(struct iw_cm_id *cm_id, enum iw_cm_event_type type,
259 TP_ARGS(cm_id, type, status, caller),
260 TP_STRUCT__entry(__field(struct iw_cm_id *, cm_id)
265 TP_fast_assign(__entry->cm_id = cm_id;
271 __entry->cm_id,
/linux/fs/smb/server/
H A Dtransport_rdma.c84 struct rdma_cm_id *cm_id; member
103 struct rdma_cm_id *cm_id; member
269 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr, in put_recvmsg()
295 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr, in put_empty_recvmsg()
338 rdma_disconnect(t->cm_id); in smb_direct_disconnect_rdma_work()
360 static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id) in alloc_transport() argument
369 t->cm_id = cm_id; in alloc_transport()
370 cm_id->context = t; in alloc_transport()
452 if (t->cm_id) in free_transport()
453 rdma_destroy_id(t->cm_id); in free_transport()
[all …]
/linux/drivers/infiniband/hw/erdma/
H A Derdma_cm.c238 struct iw_cm_id *cm_id; in erdma_cm_upcall() local
246 cm_id = cep->listen_cep->cm_id; in erdma_cm_upcall()
251 cm_id = cep->cm_id; in erdma_cm_upcall()
267 return cm_id->event_handler(cm_id, &event); in erdma_cm_upcall()
283 if (cep->cm_id) { in erdma_qp_cm_drop()
301 cep->cm_id->rem_ref(cep->cm_id); in erdma_qp_cm_drop()
302 cep->cm_id = NULL; in erdma_qp_cm_drop()
800 if (cep->cm_id) in erdma_cm_work_handler()
805 if (cep->cm_id) { in erdma_cm_work_handler()
838 if (cep->cm_id) in erdma_cm_work_handler()
[all …]
/linux/drivers/infiniband/hw/cxgb4/
H A Dcm.c152 epc->cm_id->rem_ref(epc->cm_id); in deref_cm_id()
153 epc->cm_id = NULL; in deref_cm_id()
160 epc->cm_id->add_ref(epc->cm_id); in ref_cm_id()
1277 if (ep->com.cm_id) { in close_complete_upcall()
1279 ep, ep->com.cm_id, ep->hwtid); in close_complete_upcall()
1280 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in close_complete_upcall()
1293 if (ep->com.cm_id) { in peer_close_upcall()
1295 ep, ep->com.cm_id, ep->hwtid); in peer_close_upcall()
1296 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in peer_close_upcall()
1309 if (ep->com.cm_id) { in peer_abort_upcall()
[all …]
/linux/drivers/infiniband/ulp/ipoib/
H A Dipoib_cm.c80 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
274 struct ib_cm_id *cm_id, struct ib_qp *qp, in ipoib_cm_modify_rx_qp() argument
282 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp()
293 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp()
314 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp()
347 static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id, in ipoib_cm_nonsrq_init_rx() argument
375 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0); in ipoib_cm_nonsrq_init_rx()
420 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, in ipoib_cm_send_rep() argument
439 return ib_send_cm_rep(cm_id, &rep); in ipoib_cm_send_rep()
442 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, in ipoib_cm_req_handler() argument
[all …]
/linux/drivers/infiniband/ulp/rtrs/
H A Drtrs-srv.c791 static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno);
1558 rdma_disconnect(con->c.cm_id); in rtrs_srv_close_work()
1584 rdma_destroy_id(con->c.cm_id); in rtrs_srv_close_work()
1600 struct rdma_cm_id *cm_id) in rtrs_rdma_do_accept() argument
1624 err = rdma_accept(cm_id, &param); in rtrs_rdma_do_accept()
1631 static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno) in rtrs_rdma_do_reject() argument
1642 err = rdma_reject(cm_id, &msg, sizeof(msg), IB_CM_REJ_CONSUMER_DEFINED); in rtrs_rdma_do_reject()
1664 struct rdma_cm_id *cm_id, in create_con() argument
1682 con->c.cm_id = cm_id; in create_con()
1735 cm_id->context = &con->c; in create_con()
[all …]
H A Drtrs.c218 rdma_notify(con->cm_id, IB_EVENT_COMM_EST); in qp_event_handler()
235 struct rdma_cm_id *cm_id = con->cm_id; in create_cq() local
239 cq = ib_alloc_cq(cm_id->device, con, nr_cqe, cq_vector, in create_cq()
242 cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx); in create_cq()
259 struct rdma_cm_id *cm_id = con->cm_id; in create_qp() local
274 ret = rdma_create_qp(cm_id, pd, &init_attr); in create_qp()
279 con->qp = cm_id->qp; in create_qp()
321 rdma_destroy_qp(con->cm_id); in rtrs_cq_qp_destroy()
/linux/drivers/nvme/host/
H A Drdma.c95 struct rdma_cm_id *cm_id; member
151 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
279 ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr); in nvme_rdma_create_qp()
281 queue->qp = queue->cm_id->qp; in nvme_rdma_create_qp()
367 nvme_rdma_find_get_device(struct rdma_cm_id *cm_id) in nvme_rdma_find_get_device() argument
373 if (ndev->dev->node_guid == cm_id->device->node_guid && in nvme_rdma_find_get_device()
382 ndev->dev = cm_id->device; in nvme_rdma_find_get_device()
496 queue->device = nvme_rdma_find_get_device(queue->cm_id); in nvme_rdma_create_queue_ib()
498 dev_err(queue->cm_id->device->dev.parent, in nvme_rdma_create_queue_ib()
561 rdma_destroy_qp(queue->cm_id); in nvme_rdma_create_queue_ib()
[all …]
/linux/drivers/infiniband/ulp/srpt/
H A Dib_srpt.c289 rdma_notify(ch->rdma_cm.cm_id, event->event); in srpt_qp_event()
291 ib_cm_notify(ch->ib_cm.cm_id, event->event); in srpt_qp_event()
1225 ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask); in srpt_ch_qp_rtr()
1255 ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask); in srpt_ch_qp_rts()
1898 ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init); in srpt_create_ch_ib()
1899 ch->qp = ch->rdma_cm.cm_id->qp; in srpt_create_ch_ib()
2010 ret = rdma_disconnect(ch->rdma_cm.cm_id); in srpt_disconnect_ch()
2012 ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0); in srpt_disconnect_ch()
2014 ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0); in srpt_disconnect_ch()
2163 rdma_destroy_id(ch->rdma_cm.cm_id); in srpt_release_channel_work()
[all …]
/linux/drivers/infiniband/ulp/isert/
H A Dib_isert.c92 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); in isert_qp_event_callback()
437 isert_conn->cm_id = cma_id; in isert_connect_request()
494 if (isert_conn->cm_id && in isert_connect_release()
496 rdma_destroy_id(isert_conn->cm_id); in isert_connect_release()
554 struct isert_np *isert_np = isert_conn->cm_id->context; in isert_handle_unbound_conn()
592 err = rdma_disconnect(isert_conn->cm_id); in isert_conn_terminate()
607 isert_np->cm_id = NULL; in isert_np_cma_handler()
610 isert_np->cm_id = isert_setup_id(isert_np); in isert_np_cma_handler()
611 if (IS_ERR(isert_np->cm_id)) { in isert_np_cma_handler()
613 isert_np, PTR_ERR(isert_np->cm_id)); in isert_np_cma_handler()
[all …]

12