1 /*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id$
34 */
35 #define _GNU_SOURCE
36 #include <config.h>
37
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdio.h>
41 #include <fcntl.h>
42 #include <errno.h>
43 #include <unistd.h>
44 #include <pthread.h>
45 #include <stddef.h>
46
47 #include <infiniband/cm.h>
48 #include <rdma/ib_user_cm.h>
49 #include <infiniband/driver.h>
50 #include <infiniband/marshall.h>
51
52 #define PFX "libibcm: "
53
54 #define IB_USER_CM_MIN_ABI_VERSION 4
55 #define IB_USER_CM_MAX_ABI_VERSION 5
56
57 static int abi_ver;
58 static pthread_mutex_t mut = PTHREAD_MUTEX_INITIALIZER;
59
60 enum {
61 IB_UCM_MAX_DEVICES = 32
62 };
63
ERR(int err)64 static inline int ERR(int err)
65 {
66 errno = err;
67 return -1;
68 }
69
70
71 #define CM_CREATE_MSG_CMD_RESP(msg, cmd, resp, type, size) \
72 do { \
73 struct ib_ucm_cmd_hdr *hdr; \
74 \
75 size = sizeof(*hdr) + sizeof(*cmd); \
76 msg = alloca(size); \
77 if (!msg) \
78 return ERR(ENOMEM); \
79 hdr = msg; \
80 cmd = msg + sizeof(*hdr); \
81 hdr->cmd = type; \
82 hdr->in = sizeof(*cmd); \
83 hdr->out = sizeof(*resp); \
84 memset(cmd, 0, sizeof(*cmd)); \
85 resp = alloca(sizeof(*resp)); \
86 if (!resp) \
87 return ERR(ENOMEM); \
88 cmd->response = (uintptr_t)resp;\
89 } while (0)
90
91 #define CM_CREATE_MSG_CMD(msg, cmd, type, size) \
92 do { \
93 struct ib_ucm_cmd_hdr *hdr; \
94 \
95 size = sizeof(*hdr) + sizeof(*cmd); \
96 msg = alloca(size); \
97 if (!msg) \
98 return ERR(ENOMEM); \
99 hdr = msg; \
100 cmd = msg + sizeof(*hdr); \
101 hdr->cmd = type; \
102 hdr->in = sizeof(*cmd); \
103 hdr->out = 0; \
104 memset(cmd, 0, sizeof(*cmd)); \
105 } while (0)
106
107 struct cm_id_private {
108 struct ib_cm_id id;
109 int events_completed;
110 pthread_cond_t cond;
111 pthread_mutex_t mut;
112 };
113
check_abi_version(void)114 static int check_abi_version(void)
115 {
116 char value[8];
117
118 if (ibv_read_sysfs_file(ibv_get_sysfs_path(),
119 "class/infiniband_cm/abi_version",
120 value, sizeof value) < 0) {
121 fprintf(stderr, PFX "couldn't read ABI version\n");
122 return 0;
123 }
124
125 abi_ver = strtol(value, NULL, 10);
126 if (abi_ver < IB_USER_CM_MIN_ABI_VERSION ||
127 abi_ver > IB_USER_CM_MAX_ABI_VERSION) {
128 fprintf(stderr, PFX "kernel ABI version %d "
129 "doesn't match library version %d.\n",
130 abi_ver, IB_USER_CM_MAX_ABI_VERSION);
131 return -1;
132 }
133 return 0;
134 }
135
ucm_init(void)136 static int ucm_init(void)
137 {
138 int ret = 0;
139
140 pthread_mutex_lock(&mut);
141 if (!abi_ver)
142 ret = check_abi_version();
143 pthread_mutex_unlock(&mut);
144
145 return ret;
146 }
147
ucm_get_dev_index(char * dev_name)148 static int ucm_get_dev_index(char *dev_name)
149 {
150 char *dev_path;
151 char ibdev[IBV_SYSFS_NAME_MAX];
152 int i, ret;
153
154 for (i = 0; i < IB_UCM_MAX_DEVICES; i++) {
155 ret = asprintf(&dev_path, "/sys/class/infiniband_cm/ucm%d", i);
156 if (ret < 0)
157 return -1;
158
159 ret = ibv_read_sysfs_file(dev_path, "ibdev", ibdev, sizeof ibdev);
160 if (ret < 0)
161 continue;
162
163 if (!strcmp(dev_name, ibdev)) {
164 free(dev_path);
165 return i;
166 }
167
168 free(dev_path);
169 }
170 return -1;
171 }
172
ib_cm_open_device(struct ibv_context * device_context)173 struct ib_cm_device* ib_cm_open_device(struct ibv_context *device_context)
174 {
175 struct ib_cm_device *dev;
176 char *dev_path;
177 int index, ret;
178
179 if (ucm_init())
180 return NULL;
181
182 index = ucm_get_dev_index(device_context->device->name);
183 if (index < 0)
184 return NULL;
185
186 dev = malloc(sizeof *dev);
187 if (!dev)
188 return NULL;
189
190 dev->device_context = device_context;
191
192 ret = asprintf(&dev_path, "/dev/ucm%d", index);
193 if (ret < 0)
194 goto err1;
195
196 dev->fd = open(dev_path, O_RDWR);
197 if (dev->fd < 0)
198 goto err2;
199
200 free(dev_path);
201 return dev;
202
203 err2:
204 free(dev_path);
205 err1:
206 free(dev);
207 return NULL;
208 }
209
ib_cm_close_device(struct ib_cm_device * device)210 void ib_cm_close_device(struct ib_cm_device *device)
211 {
212 close(device->fd);
213 free(device);
214 }
215
ib_cm_free_id(struct cm_id_private * cm_id_priv)216 static void ib_cm_free_id(struct cm_id_private *cm_id_priv)
217 {
218 pthread_cond_destroy(&cm_id_priv->cond);
219 pthread_mutex_destroy(&cm_id_priv->mut);
220 free(cm_id_priv);
221 }
222
ib_cm_alloc_id(struct ib_cm_device * device,void * context)223 static struct cm_id_private *ib_cm_alloc_id(struct ib_cm_device *device,
224 void *context)
225 {
226 struct cm_id_private *cm_id_priv;
227
228 cm_id_priv = malloc(sizeof *cm_id_priv);
229 if (!cm_id_priv)
230 return NULL;
231
232 memset(cm_id_priv, 0, sizeof *cm_id_priv);
233 cm_id_priv->id.device = device;
234 cm_id_priv->id.context = context;
235 if (pthread_mutex_init(&cm_id_priv->mut, NULL))
236 goto err;
237 if (pthread_cond_init(&cm_id_priv->cond, NULL))
238 goto err;
239
240 return cm_id_priv;
241
242 err: ib_cm_free_id(cm_id_priv);
243 return NULL;
244 }
245
ib_cm_create_id(struct ib_cm_device * device,struct ib_cm_id ** cm_id,void * context)246 int ib_cm_create_id(struct ib_cm_device *device,
247 struct ib_cm_id **cm_id, void *context)
248 {
249 struct ib_ucm_create_id_resp *resp;
250 struct ib_ucm_create_id *cmd;
251 struct cm_id_private *cm_id_priv;
252 void *msg;
253 int result;
254 int size;
255
256 cm_id_priv = ib_cm_alloc_id(device, context);
257 if (!cm_id_priv)
258 return ERR(ENOMEM);
259
260 CM_CREATE_MSG_CMD_RESP(msg, cmd, resp, IB_USER_CM_CMD_CREATE_ID, size);
261 cmd->uid = (uintptr_t) cm_id_priv;
262
263 result = write(device->fd, msg, size);
264 if (result != size)
265 goto err;
266
267 VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);
268
269 cm_id_priv->id.handle = resp->id;
270 *cm_id = &cm_id_priv->id;
271 return 0;
272
273 err: ib_cm_free_id(cm_id_priv);
274 return result;
275 }
276
ib_cm_destroy_id(struct ib_cm_id * cm_id)277 int ib_cm_destroy_id(struct ib_cm_id *cm_id)
278 {
279 struct ib_ucm_destroy_id_resp *resp;
280 struct ib_ucm_destroy_id *cmd;
281 struct cm_id_private *cm_id_priv;
282 void *msg;
283 int result;
284 int size;
285
286 CM_CREATE_MSG_CMD_RESP(msg, cmd, resp, IB_USER_CM_CMD_DESTROY_ID, size);
287 cmd->id = cm_id->handle;
288
289 result = write(cm_id->device->fd, msg, size);
290 if (result != size)
291 return (result >= 0) ? ERR(ENODATA) : -1;
292
293 VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);
294
295 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
296
297 pthread_mutex_lock(&cm_id_priv->mut);
298 while (cm_id_priv->events_completed < resp->events_reported)
299 pthread_cond_wait(&cm_id_priv->cond, &cm_id_priv->mut);
300 pthread_mutex_unlock(&cm_id_priv->mut);
301
302 ib_cm_free_id(cm_id_priv);
303 return 0;
304 }
305
ib_cm_attr_id(struct ib_cm_id * cm_id,struct ib_cm_attr_param * param)306 int ib_cm_attr_id(struct ib_cm_id *cm_id, struct ib_cm_attr_param *param)
307 {
308 struct ib_ucm_attr_id_resp *resp;
309 struct ib_ucm_attr_id *cmd;
310 void *msg;
311 int result;
312 int size;
313
314 if (!param)
315 return ERR(EINVAL);
316
317 CM_CREATE_MSG_CMD_RESP(msg, cmd, resp, IB_USER_CM_CMD_ATTR_ID, size);
318 cmd->id = cm_id->handle;
319
320 result = write(cm_id->device->fd, msg, size);
321 if (result != size)
322 return (result >= 0) ? ERR(ENODATA) : -1;
323
324 VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);
325
326 param->service_id = resp->service_id;
327 param->service_mask = resp->service_mask;
328 param->local_id = resp->local_id;
329 param->remote_id = resp->remote_id;
330 return 0;
331 }
332
ib_cm_init_qp_attr(struct ib_cm_id * cm_id,struct ibv_qp_attr * qp_attr,int * qp_attr_mask)333 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
334 struct ibv_qp_attr *qp_attr,
335 int *qp_attr_mask)
336 {
337 struct ibv_kern_qp_attr *resp;
338 struct ib_ucm_init_qp_attr *cmd;
339 void *msg;
340 int result;
341 int size;
342
343 if (!qp_attr || !qp_attr_mask)
344 return ERR(EINVAL);
345
346 CM_CREATE_MSG_CMD_RESP(msg, cmd, resp, IB_USER_CM_CMD_INIT_QP_ATTR, size);
347 cmd->id = cm_id->handle;
348 cmd->qp_state = qp_attr->qp_state;
349
350 result = write(cm_id->device->fd, msg, size);
351 if (result != size)
352 return (result >= 0) ? ERR(ENODATA) : result;
353
354 VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);
355
356 *qp_attr_mask = resp->qp_attr_mask;
357 ibv_copy_qp_attr_from_kern(qp_attr, resp);
358
359 return 0;
360 }
361
ib_cm_listen(struct ib_cm_id * cm_id,__be64 service_id,__be64 service_mask)362 int ib_cm_listen(struct ib_cm_id *cm_id,
363 __be64 service_id,
364 __be64 service_mask)
365 {
366 struct ib_ucm_listen *cmd;
367 void *msg;
368 int result;
369 int size;
370
371 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_LISTEN, size);
372 cmd->id = cm_id->handle;
373 cmd->service_id = service_id;
374 cmd->service_mask = service_mask;
375
376 result = write(cm_id->device->fd, msg, size);
377 if (result != size)
378 return (result >= 0) ? ERR(ENODATA) : -1;
379
380 return 0;
381 }
382
ib_cm_send_req(struct ib_cm_id * cm_id,struct ib_cm_req_param * param)383 int ib_cm_send_req(struct ib_cm_id *cm_id, struct ib_cm_req_param *param)
384 {
385 struct ib_user_path_rec p_path;
386 struct ib_user_path_rec *a_path;
387 struct ib_ucm_req *cmd;
388 void *msg;
389 int result;
390 int size;
391
392 if (!param || !param->primary_path)
393 return ERR(EINVAL);
394
395 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_SEND_REQ, size);
396 cmd->id = cm_id->handle;
397 cmd->qpn = param->qp_num;
398 cmd->qp_type = param->qp_type;
399 cmd->psn = param->starting_psn;
400 cmd->sid = param->service_id;
401 cmd->peer_to_peer = param->peer_to_peer;
402 cmd->responder_resources = param->responder_resources;
403 cmd->initiator_depth = param->initiator_depth;
404 cmd->remote_cm_response_timeout = param->remote_cm_response_timeout;
405 cmd->flow_control = param->flow_control;
406 cmd->local_cm_response_timeout = param->local_cm_response_timeout;
407 cmd->retry_count = param->retry_count;
408 cmd->rnr_retry_count = param->rnr_retry_count;
409 cmd->max_cm_retries = param->max_cm_retries;
410 cmd->srq = param->srq;
411
412 ibv_copy_path_rec_to_kern(&p_path, param->primary_path);
413 cmd->primary_path = (uintptr_t) &p_path;
414
415 if (param->alternate_path) {
416 a_path = alloca(sizeof(*a_path));
417 if (!a_path)
418 return ERR(ENOMEM);
419
420 ibv_copy_path_rec_to_kern(a_path, param->alternate_path);
421 cmd->alternate_path = (uintptr_t) a_path;
422 }
423
424 if (param->private_data && param->private_data_len) {
425 cmd->data = (uintptr_t) param->private_data;
426 cmd->len = param->private_data_len;
427 }
428
429 result = write(cm_id->device->fd, msg, size);
430 if (result != size)
431 return (result >= 0) ? ERR(ENODATA) : -1;
432
433 return 0;
434 }
435
ib_cm_send_rep(struct ib_cm_id * cm_id,struct ib_cm_rep_param * param)436 int ib_cm_send_rep(struct ib_cm_id *cm_id, struct ib_cm_rep_param *param)
437 {
438 struct ib_ucm_rep *cmd;
439 void *msg;
440 int result;
441 int size;
442
443 if (!param)
444 return ERR(EINVAL);
445
446 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_SEND_REP, size);
447 cmd->uid = (uintptr_t) container_of(cm_id, struct cm_id_private, id);
448 cmd->id = cm_id->handle;
449 cmd->qpn = param->qp_num;
450 cmd->psn = param->starting_psn;
451 cmd->responder_resources = param->responder_resources;
452 cmd->initiator_depth = param->initiator_depth;
453 cmd->target_ack_delay = param->target_ack_delay;
454 cmd->failover_accepted = param->failover_accepted;
455 cmd->flow_control = param->flow_control;
456 cmd->rnr_retry_count = param->rnr_retry_count;
457 cmd->srq = param->srq;
458
459 if (param->private_data && param->private_data_len) {
460 cmd->data = (uintptr_t) param->private_data;
461 cmd->len = param->private_data_len;
462 }
463
464 result = write(cm_id->device->fd, msg, size);
465 if (result != size)
466 return (result >= 0) ? ERR(ENODATA) : -1;
467
468 return 0;
469 }
470
cm_send_private_data(struct ib_cm_id * cm_id,uint32_t type,void * private_data,uint8_t private_data_len)471 static inline int cm_send_private_data(struct ib_cm_id *cm_id,
472 uint32_t type,
473 void *private_data,
474 uint8_t private_data_len)
475 {
476 struct ib_ucm_private_data *cmd;
477 void *msg;
478 int result;
479 int size;
480
481 CM_CREATE_MSG_CMD(msg, cmd, type, size);
482 cmd->id = cm_id->handle;
483
484 if (private_data && private_data_len) {
485 cmd->data = (uintptr_t) private_data;
486 cmd->len = private_data_len;
487 }
488
489 result = write(cm_id->device->fd, msg, size);
490 if (result != size)
491 return (result >= 0) ? ERR(ENODATA) : -1;
492
493 return 0;
494 }
495
ib_cm_send_rtu(struct ib_cm_id * cm_id,void * private_data,uint8_t private_data_len)496 int ib_cm_send_rtu(struct ib_cm_id *cm_id,
497 void *private_data,
498 uint8_t private_data_len)
499 {
500 return cm_send_private_data(cm_id, IB_USER_CM_CMD_SEND_RTU,
501 private_data, private_data_len);
502 }
503
ib_cm_send_dreq(struct ib_cm_id * cm_id,void * private_data,uint8_t private_data_len)504 int ib_cm_send_dreq(struct ib_cm_id *cm_id,
505 void *private_data,
506 uint8_t private_data_len)
507 {
508 return cm_send_private_data(cm_id, IB_USER_CM_CMD_SEND_DREQ,
509 private_data, private_data_len);
510 }
511
ib_cm_send_drep(struct ib_cm_id * cm_id,void * private_data,uint8_t private_data_len)512 int ib_cm_send_drep(struct ib_cm_id *cm_id,
513 void *private_data,
514 uint8_t private_data_len)
515 {
516 return cm_send_private_data(cm_id, IB_USER_CM_CMD_SEND_DREP,
517 private_data, private_data_len);
518 }
519
cm_establish(struct ib_cm_id * cm_id)520 static int cm_establish(struct ib_cm_id *cm_id)
521 {
522 /* In kernel ABI 4 ESTABLISH was repurposed as NOTIFY and gained an
523 extra field. For some reason the compat definitions were deleted
524 from the uapi headers :( */
525 #define IB_USER_CM_CMD_ESTABLISH IB_USER_CM_CMD_NOTIFY
526 struct cm_abi_establish { /* ABI 4 support */
527 __u32 id;
528 };
529
530 struct cm_abi_establish *cmd;
531 void *msg;
532 int result;
533 int size;
534
535 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_ESTABLISH, size);
536 cmd->id = cm_id->handle;
537
538 result = write(cm_id->device->fd, msg, size);
539 if (result != size)
540 return (result >= 0) ? ERR(ENODATA) : -1;
541
542 return 0;
543 }
544
ib_cm_notify(struct ib_cm_id * cm_id,enum ibv_event_type event)545 int ib_cm_notify(struct ib_cm_id *cm_id, enum ibv_event_type event)
546 {
547 struct ib_ucm_notify *cmd;
548 void *msg;
549 int result;
550 int size;
551
552 if (abi_ver == 4) {
553 if (event == IBV_EVENT_COMM_EST)
554 return cm_establish(cm_id);
555 else
556 return ERR(EINVAL);
557 }
558
559 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_NOTIFY, size);
560 cmd->id = cm_id->handle;
561 cmd->event = event;
562
563 result = write(cm_id->device->fd, msg, size);
564 if (result != size)
565 return (result >= 0) ? ERR(ENODATA) : -1;
566
567 return 0;
568 }
569
cm_send_status(struct ib_cm_id * cm_id,uint32_t type,int status,void * info,uint8_t info_length,void * private_data,uint8_t private_data_len)570 static inline int cm_send_status(struct ib_cm_id *cm_id,
571 uint32_t type,
572 int status,
573 void *info,
574 uint8_t info_length,
575 void *private_data,
576 uint8_t private_data_len)
577 {
578 struct ib_ucm_info *cmd;
579 void *msg;
580 int result;
581 int size;
582
583 CM_CREATE_MSG_CMD(msg, cmd, type, size);
584 cmd->id = cm_id->handle;
585 cmd->status = status;
586
587 if (private_data && private_data_len) {
588 cmd->data = (uintptr_t) private_data;
589 cmd->data_len = private_data_len;
590 }
591
592 if (info && info_length) {
593 cmd->info = (uintptr_t) info;
594 cmd->info_len = info_length;
595 }
596
597 result = write(cm_id->device->fd, msg, size);
598 if (result != size)
599 return (result >= 0) ? ERR(ENODATA) : -1;
600
601 return 0;
602 }
603
ib_cm_send_rej(struct ib_cm_id * cm_id,enum ib_cm_rej_reason reason,void * ari,uint8_t ari_length,void * private_data,uint8_t private_data_len)604 int ib_cm_send_rej(struct ib_cm_id *cm_id,
605 enum ib_cm_rej_reason reason,
606 void *ari,
607 uint8_t ari_length,
608 void *private_data,
609 uint8_t private_data_len)
610 {
611 return cm_send_status(cm_id, IB_USER_CM_CMD_SEND_REJ, reason,
612 ari, ari_length,
613 private_data, private_data_len);
614 }
615
ib_cm_send_apr(struct ib_cm_id * cm_id,enum ib_cm_apr_status status,void * info,uint8_t info_length,void * private_data,uint8_t private_data_len)616 int ib_cm_send_apr(struct ib_cm_id *cm_id,
617 enum ib_cm_apr_status status,
618 void *info,
619 uint8_t info_length,
620 void *private_data,
621 uint8_t private_data_len)
622 {
623 return cm_send_status(cm_id, IB_USER_CM_CMD_SEND_APR, status,
624 info, info_length,
625 private_data, private_data_len);
626 }
627
ib_cm_send_mra(struct ib_cm_id * cm_id,uint8_t service_timeout,void * private_data,uint8_t private_data_len)628 int ib_cm_send_mra(struct ib_cm_id *cm_id,
629 uint8_t service_timeout,
630 void *private_data,
631 uint8_t private_data_len)
632 {
633 struct ib_ucm_mra *cmd;
634 void *msg;
635 int result;
636 int size;
637
638 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_SEND_MRA, size);
639 cmd->id = cm_id->handle;
640 cmd->timeout = service_timeout;
641
642 if (private_data && private_data_len) {
643 cmd->data = (uintptr_t) private_data;
644 cmd->len = private_data_len;
645 }
646
647 result = write(cm_id->device->fd, msg, size);
648 if (result != size)
649 return (result >= 0) ? ERR(ENODATA) : result;
650
651 return 0;
652 }
653
ib_cm_send_lap(struct ib_cm_id * cm_id,struct ibv_sa_path_rec * alternate_path,void * private_data,uint8_t private_data_len)654 int ib_cm_send_lap(struct ib_cm_id *cm_id,
655 struct ibv_sa_path_rec *alternate_path,
656 void *private_data,
657 uint8_t private_data_len)
658 {
659 struct ib_user_path_rec abi_path;
660 struct ib_ucm_lap *cmd;
661 void *msg;
662 int result;
663 int size;
664
665 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_SEND_LAP, size);
666 cmd->id = cm_id->handle;
667
668 ibv_copy_path_rec_to_kern(&abi_path, alternate_path);
669 cmd->path = (uintptr_t) &abi_path;
670
671 if (private_data && private_data_len) {
672 cmd->data = (uintptr_t) private_data;
673 cmd->len = private_data_len;
674 }
675
676 result = write(cm_id->device->fd, msg, size);
677 if (result != size)
678 return (result >= 0) ? ERR(ENODATA) : -1;
679
680 return 0;
681 }
682
ib_cm_send_sidr_req(struct ib_cm_id * cm_id,struct ib_cm_sidr_req_param * param)683 int ib_cm_send_sidr_req(struct ib_cm_id *cm_id,
684 struct ib_cm_sidr_req_param *param)
685 {
686 struct ib_user_path_rec abi_path;
687 struct ib_ucm_sidr_req *cmd;
688 void *msg;
689 int result;
690 int size;
691
692 if (!param || !param->path)
693 return ERR(EINVAL);
694
695 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_SEND_SIDR_REQ, size);
696 cmd->id = cm_id->handle;
697 cmd->sid = param->service_id;
698 cmd->timeout = param->timeout_ms;
699 cmd->max_cm_retries = param->max_cm_retries;
700
701 ibv_copy_path_rec_to_kern(&abi_path, param->path);
702 cmd->path = (uintptr_t) &abi_path;
703
704 if (param->private_data && param->private_data_len) {
705 cmd->data = (uintptr_t) param->private_data;
706 cmd->len = param->private_data_len;
707 }
708
709 result = write(cm_id->device->fd, msg, size);
710 if (result != size)
711 return (result >= 0) ? ERR(ENODATA) : result;
712
713 return 0;
714 }
715
ib_cm_send_sidr_rep(struct ib_cm_id * cm_id,struct ib_cm_sidr_rep_param * param)716 int ib_cm_send_sidr_rep(struct ib_cm_id *cm_id,
717 struct ib_cm_sidr_rep_param *param)
718 {
719 struct ib_ucm_sidr_rep *cmd;
720 void *msg;
721 int result;
722 int size;
723
724 if (!param)
725 return ERR(EINVAL);
726
727 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_SEND_SIDR_REP, size);
728 cmd->id = cm_id->handle;
729 cmd->qpn = param->qp_num;
730 cmd->qkey = param->qkey;
731 cmd->status = param->status;
732
733 if (param->private_data && param->private_data_len) {
734 cmd->data = (uintptr_t) param->private_data;
735 cmd->data_len = param->private_data_len;
736 }
737
738 if (param->info && param->info_length) {
739 cmd->info = (uintptr_t) param->info;
740 cmd->info_len = param->info_length;
741 }
742
743 result = write(cm_id->device->fd, msg, size);
744 if (result != size)
745 return (result >= 0) ? ERR(ENODATA) : -1;
746
747 return 0;
748 }
749
cm_event_req_get(struct ib_cm_req_event_param * ureq,struct ib_ucm_req_event_resp * kreq)750 static void cm_event_req_get(struct ib_cm_req_event_param *ureq,
751 struct ib_ucm_req_event_resp *kreq)
752 {
753 ureq->remote_ca_guid = kreq->remote_ca_guid;
754 ureq->remote_qkey = kreq->remote_qkey;
755 ureq->remote_qpn = kreq->remote_qpn;
756 ureq->qp_type = kreq->qp_type;
757 ureq->starting_psn = kreq->starting_psn;
758 ureq->responder_resources = kreq->responder_resources;
759 ureq->initiator_depth = kreq->initiator_depth;
760 ureq->local_cm_response_timeout = kreq->local_cm_response_timeout;
761 ureq->flow_control = kreq->flow_control;
762 ureq->remote_cm_response_timeout = kreq->remote_cm_response_timeout;
763 ureq->retry_count = kreq->retry_count;
764 ureq->rnr_retry_count = kreq->rnr_retry_count;
765 ureq->srq = kreq->srq;
766 ureq->port = kreq->port;
767
768 ibv_copy_path_rec_from_kern(ureq->primary_path, &kreq->primary_path);
769 if (ureq->alternate_path)
770 ibv_copy_path_rec_from_kern(ureq->alternate_path,
771 &kreq->alternate_path);
772 }
773
cm_event_rep_get(struct ib_cm_rep_event_param * urep,struct ib_ucm_rep_event_resp * krep)774 static void cm_event_rep_get(struct ib_cm_rep_event_param *urep,
775 struct ib_ucm_rep_event_resp *krep)
776 {
777 urep->remote_ca_guid = krep->remote_ca_guid;
778 urep->remote_qkey = krep->remote_qkey;
779 urep->remote_qpn = krep->remote_qpn;
780 urep->starting_psn = krep->starting_psn;
781 urep->responder_resources = krep->responder_resources;
782 urep->initiator_depth = krep->initiator_depth;
783 urep->target_ack_delay = krep->target_ack_delay;
784 urep->failover_accepted = krep->failover_accepted;
785 urep->flow_control = krep->flow_control;
786 urep->rnr_retry_count = krep->rnr_retry_count;
787 urep->srq = krep->srq;
788 }
789
cm_event_sidr_rep_get(struct ib_cm_sidr_rep_event_param * urep,struct ib_ucm_sidr_rep_event_resp * krep)790 static void cm_event_sidr_rep_get(struct ib_cm_sidr_rep_event_param *urep,
791 struct ib_ucm_sidr_rep_event_resp *krep)
792 {
793 urep->status = krep->status;
794 urep->qkey = krep->qkey;
795 urep->qpn = krep->qpn;
796 };
797
ib_cm_get_event(struct ib_cm_device * device,struct ib_cm_event ** event)798 int ib_cm_get_event(struct ib_cm_device *device, struct ib_cm_event **event)
799 {
800 struct cm_id_private *cm_id_priv;
801 struct ib_ucm_cmd_hdr *hdr;
802 struct ib_ucm_event_get *cmd;
803 struct ib_ucm_event_resp *resp;
804 struct ib_cm_event *evt = NULL;
805 struct ibv_sa_path_rec *path_a = NULL;
806 struct ibv_sa_path_rec *path_b = NULL;
807 void *data = NULL;
808 void *info = NULL;
809 void *msg;
810 int result = 0;
811 int size;
812
813 if (!event)
814 return ERR(EINVAL);
815
816 size = sizeof(*hdr) + sizeof(*cmd);
817 msg = alloca(size);
818 if (!msg)
819 return ERR(ENOMEM);
820
821 hdr = msg;
822 cmd = msg + sizeof(*hdr);
823
824 hdr->cmd = IB_USER_CM_CMD_EVENT;
825 hdr->in = sizeof(*cmd);
826 hdr->out = sizeof(*resp);
827
828 memset(cmd, 0, sizeof(*cmd));
829
830 resp = alloca(sizeof(*resp));
831 if (!resp)
832 return ERR(ENOMEM);
833
834 cmd->response = (uintptr_t) resp;
835 cmd->data_len = (uint8_t)(~0U);
836 cmd->info_len = (uint8_t)(~0U);
837
838 data = malloc(cmd->data_len);
839 if (!data) {
840 result = ERR(ENOMEM);
841 goto done;
842 }
843
844 info = malloc(cmd->info_len);
845 if (!info) {
846 result = ERR(ENOMEM);
847 goto done;
848 }
849
850 cmd->data = (uintptr_t) data;
851 cmd->info = (uintptr_t) info;
852
853 result = write(device->fd, msg, size);
854 if (result != size) {
855 result = (result >= 0) ? ERR(ENODATA) : -1;
856 goto done;
857 }
858
859 VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);
860
861 /*
862 * decode event.
863 */
864 evt = malloc(sizeof(*evt));
865 if (!evt) {
866 result = ERR(ENOMEM);
867 goto done;
868 }
869 memset(evt, 0, sizeof(*evt));
870 evt->cm_id = (void *) (uintptr_t) resp->uid;
871 evt->event = resp->event;
872
873 if (resp->present & IB_UCM_PRES_PRIMARY) {
874 path_a = malloc(sizeof(*path_a));
875 if (!path_a) {
876 result = ERR(ENOMEM);
877 goto done;
878 }
879 }
880
881 if (resp->present & IB_UCM_PRES_ALTERNATE) {
882 path_b = malloc(sizeof(*path_b));
883 if (!path_b) {
884 result = ERR(ENOMEM);
885 goto done;
886 }
887 }
888
889 switch (evt->event) {
890 case IB_CM_REQ_RECEIVED:
891 evt->param.req_rcvd.listen_id = evt->cm_id;
892 cm_id_priv = ib_cm_alloc_id(evt->cm_id->device,
893 evt->cm_id->context);
894 if (!cm_id_priv) {
895 result = ERR(ENOMEM);
896 goto done;
897 }
898 cm_id_priv->id.handle = resp->id;
899 evt->cm_id = &cm_id_priv->id;
900 evt->param.req_rcvd.primary_path = path_a;
901 evt->param.req_rcvd.alternate_path = path_b;
902 path_a = NULL;
903 path_b = NULL;
904 cm_event_req_get(&evt->param.req_rcvd, &resp->u.req_resp);
905 break;
906 case IB_CM_REP_RECEIVED:
907 cm_event_rep_get(&evt->param.rep_rcvd, &resp->u.rep_resp);
908 break;
909 case IB_CM_MRA_RECEIVED:
910 evt->param.mra_rcvd.service_timeout = resp->u.mra_resp.timeout;
911 break;
912 case IB_CM_REJ_RECEIVED:
913 evt->param.rej_rcvd.reason = resp->u.rej_resp.reason;
914 evt->param.rej_rcvd.ari = info;
915 info = NULL;
916 break;
917 case IB_CM_LAP_RECEIVED:
918 evt->param.lap_rcvd.alternate_path = path_b;
919 path_b = NULL;
920 ibv_copy_path_rec_from_kern(evt->param.lap_rcvd.alternate_path,
921 &resp->u.lap_resp.path);
922 break;
923 case IB_CM_APR_RECEIVED:
924 evt->param.apr_rcvd.ap_status = resp->u.apr_resp.status;
925 evt->param.apr_rcvd.apr_info = info;
926 info = NULL;
927 break;
928 case IB_CM_SIDR_REQ_RECEIVED:
929 evt->param.sidr_req_rcvd.listen_id = evt->cm_id;
930 cm_id_priv = ib_cm_alloc_id(evt->cm_id->device,
931 evt->cm_id->context);
932 if (!cm_id_priv) {
933 result = ERR(ENOMEM);
934 goto done;
935 }
936 cm_id_priv->id.handle = resp->id;
937 evt->cm_id = &cm_id_priv->id;
938 evt->param.sidr_req_rcvd.pkey = resp->u.sidr_req_resp.pkey;
939 evt->param.sidr_req_rcvd.port = resp->u.sidr_req_resp.port;
940 break;
941 case IB_CM_SIDR_REP_RECEIVED:
942 cm_event_sidr_rep_get(&evt->param.sidr_rep_rcvd,
943 &resp->u.sidr_rep_resp);
944 evt->param.sidr_rep_rcvd.info = info;
945 info = NULL;
946 break;
947 default:
948 evt->param.send_status = resp->u.send_status;
949 break;
950 }
951
952 if (resp->present & IB_UCM_PRES_DATA) {
953 evt->private_data = data;
954 data = NULL;
955 }
956
957 *event = evt;
958 evt = NULL;
959 result = 0;
960 done:
961 if (data)
962 free(data);
963 if (info)
964 free(info);
965 if (path_a)
966 free(path_a);
967 if (path_b)
968 free(path_b);
969 if (evt)
970 free(evt);
971
972 return result;
973 }
974
ib_cm_ack_event(struct ib_cm_event * event)975 int ib_cm_ack_event(struct ib_cm_event *event)
976 {
977 struct cm_id_private *cm_id_priv;
978
979 if (!event)
980 return ERR(EINVAL);
981
982 if (event->private_data)
983 free(event->private_data);
984
985 cm_id_priv = container_of(event->cm_id, struct cm_id_private, id);
986
987 switch (event->event) {
988 case IB_CM_REQ_RECEIVED:
989 cm_id_priv = container_of(event->param.req_rcvd.listen_id,
990 struct cm_id_private, id);
991 free(event->param.req_rcvd.primary_path);
992 if (event->param.req_rcvd.alternate_path)
993 free(event->param.req_rcvd.alternate_path);
994 break;
995 case IB_CM_REJ_RECEIVED:
996 if (event->param.rej_rcvd.ari)
997 free(event->param.rej_rcvd.ari);
998 break;
999 case IB_CM_LAP_RECEIVED:
1000 free(event->param.lap_rcvd.alternate_path);
1001 break;
1002 case IB_CM_APR_RECEIVED:
1003 if (event->param.apr_rcvd.apr_info)
1004 free(event->param.apr_rcvd.apr_info);
1005 break;
1006 case IB_CM_SIDR_REQ_RECEIVED:
1007 cm_id_priv = container_of(event->param.sidr_req_rcvd.listen_id,
1008 struct cm_id_private, id);
1009 break;
1010 case IB_CM_SIDR_REP_RECEIVED:
1011 if (event->param.sidr_rep_rcvd.info)
1012 free(event->param.sidr_rep_rcvd.info);
1013 default:
1014 break;
1015 }
1016
1017 pthread_mutex_lock(&cm_id_priv->mut);
1018 cm_id_priv->events_completed++;
1019 pthread_cond_signal(&cm_id_priv->cond);
1020 pthread_mutex_unlock(&cm_id_priv->mut);
1021
1022 free(event);
1023 return 0;
1024 }
1025