xref: /freebsd/contrib/ofed/libibverbs/device.c (revision e2afbc45258f2fa4bdcf126e959ac660e76fc802)
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 #define _GNU_SOURCE
34 #include <config.h>
35 
36 #include <infiniband/endian.h>
37 #include <stdio.h>
38 #include <sys/types.h>
39 #include <sys/stat.h>
40 #include <fcntl.h>
41 #include <unistd.h>
42 #include <stdlib.h>
43 #include <errno.h>
44 
45 #include "ibverbs.h"
46 
47 /* Hack to avoid GCC's -Wmissing-prototypes and the similar error from sparse
48    with these prototypes. Symbol versionining requires the goofy names, the
49    prototype must match the version in verbs.h.
50  */
51 struct ibv_device **__ibv_get_device_list(int *num_devices);
52 void __ibv_free_device_list(struct ibv_device **list);
53 const char *__ibv_get_device_name(struct ibv_device *device);
54 __be64 __ibv_get_device_guid(struct ibv_device *device);
55 struct ibv_context *__ibv_open_device(struct ibv_device *device);
56 int __ibv_close_device(struct ibv_context *context);
57 int __ibv_get_async_event(struct ibv_context *context,
58 			  struct ibv_async_event *event);
59 void __ibv_ack_async_event(struct ibv_async_event *event);
60 
61 static pthread_once_t device_list_once = PTHREAD_ONCE_INIT;
62 static int num_devices;
63 static struct ibv_device **device_list;
64 
65 static void count_devices(void)
66 {
67 	num_devices = ibverbs_init(&device_list);
68 }
69 
70 struct ibv_device **__ibv_get_device_list(int *num)
71 {
72 	struct ibv_device **l;
73 	int i;
74 
75 	if (num)
76 		*num = 0;
77 
78 	pthread_once(&device_list_once, count_devices);
79 
80 	if (num_devices < 0) {
81 		errno = -num_devices;
82 		return NULL;
83 	}
84 
85 	l = calloc(num_devices + 1, sizeof (struct ibv_device *));
86 	if (!l) {
87 		errno = ENOMEM;
88 		return NULL;
89 	}
90 
91 	for (i = 0; i < num_devices; ++i)
92 		l[i] = device_list[i];
93 	if (num)
94 		*num = num_devices;
95 
96 	return l;
97 }
98 default_symver(__ibv_get_device_list, ibv_get_device_list);
99 
100 void __ibv_free_device_list(struct ibv_device **list)
101 {
102 	free(list);
103 }
104 default_symver(__ibv_free_device_list, ibv_free_device_list);
105 
106 const char *__ibv_get_device_name(struct ibv_device *device)
107 {
108 	return device->name;
109 }
110 default_symver(__ibv_get_device_name, ibv_get_device_name);
111 
112 __be64 __ibv_get_device_guid(struct ibv_device *device)
113 {
114 	char attr[24];
115 	uint64_t guid = 0;
116 	uint16_t parts[4];
117 	int i;
118 
119 	if (ibv_read_sysfs_file(device->ibdev_path, "node_guid",
120 				attr, sizeof attr) < 0)
121 		return 0;
122 
123 	if (sscanf(attr, "%hx:%hx:%hx:%hx",
124 		   parts, parts + 1, parts + 2, parts + 3) != 4)
125 		return 0;
126 
127 	for (i = 0; i < 4; ++i)
128 		guid = (guid << 16) | parts[i];
129 
130 	return htobe64(guid);
131 }
132 default_symver(__ibv_get_device_guid, ibv_get_device_guid);
133 
134 int verbs_init_cq(struct ibv_cq *cq, struct ibv_context *context,
135 		       struct ibv_comp_channel *channel,
136 		       void *cq_context)
137 {
138 	int err = 0;
139 
140 	cq->context		   = context;
141 	cq->channel		   = channel;
142 
143 	err = pthread_mutex_init(&cq->mutex, NULL);
144 	if (err)
145 		return err;
146 	err = pthread_cond_init(&cq->cond, NULL);
147 	if (err)
148 		goto err;
149 
150 	if (cq->channel) {
151 		pthread_mutex_lock(&context->mutex);
152 		++cq->channel->refcnt;
153 		pthread_mutex_unlock(&context->mutex);
154 	}
155 
156 	cq->cq_context		   = cq_context;
157 	cq->comp_events_completed  = 0;
158 	cq->async_events_completed = 0;
159 
160 	return err;
161 
162 err:
163 	pthread_mutex_destroy(&cq->mutex);
164 
165 	return err;
166 }
167 
168 void verbs_cleanup_cq(struct ibv_cq *cq)
169 {
170 	pthread_cond_destroy(&cq->cond);
171 	pthread_mutex_destroy(&cq->mutex);
172 }
173 
174 static struct ibv_cq_ex *
175 __lib_ibv_create_cq_ex(struct ibv_context *context,
176 		       struct ibv_cq_init_attr_ex *cq_attr)
177 {
178 	struct verbs_context *vctx = verbs_get_ctx(context);
179 	struct ibv_cq_ex *cq;
180 	int err = 0;
181 
182 	if (cq_attr->wc_flags & ~IBV_CREATE_CQ_SUP_WC_FLAGS) {
183 		errno = EOPNOTSUPP;
184 		return NULL;
185 	}
186 
187 	cq = vctx->priv->create_cq_ex(context, cq_attr);
188 	if (!cq)
189 		return NULL;
190 
191 	err = verbs_init_cq(ibv_cq_ex_to_cq(cq), context,
192 			    cq_attr->channel, cq_attr->cq_context);
193 	if (err)
194 		goto err;
195 
196 	return cq;
197 
198 err:
199 	context->ops.destroy_cq(ibv_cq_ex_to_cq(cq));
200 
201 	return NULL;
202 }
203 
204 struct ibv_context *__ibv_open_device(struct ibv_device *device)
205 {
206 	struct verbs_device *verbs_device = verbs_get_device(device);
207 	char *devpath;
208 	int cmd_fd, ret;
209 	struct ibv_context *context;
210 	struct verbs_context *context_ex;
211 
212 	if (asprintf(&devpath, "/dev/%s", device->dev_name) < 0)
213 		return NULL;
214 
215 	/*
216 	 * We'll only be doing writes, but we need O_RDWR in case the
217 	 * provider needs to mmap() the file.
218 	 */
219 	cmd_fd = open(devpath, O_RDWR | O_CLOEXEC);
220 	free(devpath);
221 
222 	if (cmd_fd < 0)
223 		return NULL;
224 
225 	if (!verbs_device->ops->init_context) {
226 		context = verbs_device->ops->alloc_context(device, cmd_fd);
227 		if (!context)
228 			goto err;
229 
230 		if (pthread_mutex_init(&context->mutex, NULL)) {
231 			verbs_device->ops->free_context(context);
232 			goto err;
233 		}
234 	} else {
235 		struct verbs_ex_private *priv;
236 
237 		/* Library now allocates the context */
238 		context_ex = calloc(1, sizeof(*context_ex) +
239 				       verbs_device->size_of_context);
240 		if (!context_ex) {
241 			errno = ENOMEM;
242 			goto err;
243 		}
244 
245 		priv = calloc(1, sizeof(*priv));
246 		if (!priv) {
247 			errno = ENOMEM;
248 			goto err_context;
249 		}
250 
251 		context_ex->priv = priv;
252 		context_ex->context.abi_compat  = __VERBS_ABI_IS_EXTENDED;
253 		context_ex->sz = sizeof(*context_ex);
254 
255 		context = &context_ex->context;
256 		if (pthread_mutex_init(&context->mutex, NULL))
257 			goto verbs_err;
258 
259 		ret = verbs_device->ops->init_context(verbs_device, context, cmd_fd);
260 		if (ret)
261 			goto err_mutex;
262 		/*
263 		 * In order to maintain backward/forward binary compatibility
264 		 * with apps compiled against libibverbs-1.1.8 that use the
265 		 * flow steering addition, we need to set the two
266 		 * ABI_placeholder entries to match the driver set flow
267 		 * entries.  This is because apps compiled against
268 		 * libibverbs-1.1.8 use an inline ibv_create_flow and
269 		 * ibv_destroy_flow function that looks in the placeholder
270 		 * spots for the proper entry points.  For apps compiled
271 		 * against libibverbs-1.1.9 and later, the inline functions
272 		 * will be looking in the right place.
273 		 */
274 		context_ex->ABI_placeholder1 = (void (*)(void)) context_ex->ibv_create_flow;
275 		context_ex->ABI_placeholder2 = (void (*)(void)) context_ex->ibv_destroy_flow;
276 
277 		if (context_ex->create_cq_ex) {
278 			priv->create_cq_ex = context_ex->create_cq_ex;
279 			context_ex->create_cq_ex = __lib_ibv_create_cq_ex;
280 		}
281 	}
282 
283 	context->device = device;
284 	context->cmd_fd = cmd_fd;
285 
286 	return context;
287 
288 err_mutex:
289 	pthread_mutex_destroy(&context->mutex);
290 verbs_err:
291 	free(context_ex->priv);
292 err_context:
293 	free(context_ex);
294 err:
295 	close(cmd_fd);
296 	return NULL;
297 }
298 default_symver(__ibv_open_device, ibv_open_device);
299 
300 int __ibv_close_device(struct ibv_context *context)
301 {
302 	int async_fd = context->async_fd;
303 	int cmd_fd   = context->cmd_fd;
304 	struct verbs_context *context_ex;
305 	struct verbs_device *verbs_device = verbs_get_device(context->device);
306 
307 	pthread_mutex_destroy(&context->mutex);
308 	context_ex = verbs_get_ctx(context);
309 	if (context_ex) {
310 		verbs_device->ops->uninit_context(verbs_device, context);
311 		free(context_ex->priv);
312 		free(context_ex);
313 	} else {
314 		verbs_device->ops->free_context(context);
315 	}
316 
317 	close(async_fd);
318 	close(cmd_fd);
319 
320 	return 0;
321 }
322 default_symver(__ibv_close_device, ibv_close_device);
323 
324 int __ibv_get_async_event(struct ibv_context *context,
325 			  struct ibv_async_event *event)
326 {
327 	struct ibv_kern_async_event ev;
328 
329 	if (read(context->async_fd, &ev, sizeof ev) != sizeof ev)
330 		return -1;
331 
332 	event->event_type = ev.event_type;
333 
334 	switch (event->event_type) {
335 	case IBV_EVENT_CQ_ERR:
336 		event->element.cq = (void *) (uintptr_t) ev.element;
337 		break;
338 
339 	case IBV_EVENT_QP_FATAL:
340 	case IBV_EVENT_QP_REQ_ERR:
341 	case IBV_EVENT_QP_ACCESS_ERR:
342 	case IBV_EVENT_COMM_EST:
343 	case IBV_EVENT_SQ_DRAINED:
344 	case IBV_EVENT_PATH_MIG:
345 	case IBV_EVENT_PATH_MIG_ERR:
346 	case IBV_EVENT_QP_LAST_WQE_REACHED:
347 		event->element.qp = (void *) (uintptr_t) ev.element;
348 		break;
349 
350 	case IBV_EVENT_SRQ_ERR:
351 	case IBV_EVENT_SRQ_LIMIT_REACHED:
352 		event->element.srq = (void *) (uintptr_t) ev.element;
353 		break;
354 
355 	case IBV_EVENT_WQ_FATAL:
356 		event->element.wq = (void *) (uintptr_t) ev.element;
357 		break;
358 	default:
359 		event->element.port_num = ev.element;
360 		break;
361 	}
362 
363 	if (context->ops.async_event)
364 		context->ops.async_event(event);
365 
366 	return 0;
367 }
368 default_symver(__ibv_get_async_event, ibv_get_async_event);
369 
370 void __ibv_ack_async_event(struct ibv_async_event *event)
371 {
372 	switch (event->event_type) {
373 	case IBV_EVENT_CQ_ERR:
374 	{
375 		struct ibv_cq *cq = event->element.cq;
376 
377 		pthread_mutex_lock(&cq->mutex);
378 		++cq->async_events_completed;
379 		pthread_cond_signal(&cq->cond);
380 		pthread_mutex_unlock(&cq->mutex);
381 
382 		return;
383 	}
384 
385 	case IBV_EVENT_QP_FATAL:
386 	case IBV_EVENT_QP_REQ_ERR:
387 	case IBV_EVENT_QP_ACCESS_ERR:
388 	case IBV_EVENT_COMM_EST:
389 	case IBV_EVENT_SQ_DRAINED:
390 	case IBV_EVENT_PATH_MIG:
391 	case IBV_EVENT_PATH_MIG_ERR:
392 	case IBV_EVENT_QP_LAST_WQE_REACHED:
393 	{
394 		struct ibv_qp *qp = event->element.qp;
395 
396 		pthread_mutex_lock(&qp->mutex);
397 		++qp->events_completed;
398 		pthread_cond_signal(&qp->cond);
399 		pthread_mutex_unlock(&qp->mutex);
400 
401 		return;
402 	}
403 
404 	case IBV_EVENT_SRQ_ERR:
405 	case IBV_EVENT_SRQ_LIMIT_REACHED:
406 	{
407 		struct ibv_srq *srq = event->element.srq;
408 
409 		pthread_mutex_lock(&srq->mutex);
410 		++srq->events_completed;
411 		pthread_cond_signal(&srq->cond);
412 		pthread_mutex_unlock(&srq->mutex);
413 
414 		return;
415 	}
416 
417 	case IBV_EVENT_WQ_FATAL:
418 	{
419 		struct ibv_wq *wq = event->element.wq;
420 
421 		pthread_mutex_lock(&wq->mutex);
422 		++wq->events_completed;
423 		pthread_cond_signal(&wq->cond);
424 		pthread_mutex_unlock(&wq->mutex);
425 
426 		return;
427 	}
428 
429 	default:
430 		return;
431 	}
432 }
433 default_symver(__ibv_ack_async_event, ibv_ack_async_event);
434 
435 int __ibv_init_wq(struct ibv_wq *wq)
436 {
437 	int err = 0;
438 	wq->events_completed = 0;
439 	err = pthread_mutex_init(&wq->mutex, NULL);
440 	if (err)
441 			return err;
442 
443 	err = pthread_cond_init(&wq->cond, NULL);
444 	if (err)
445 			goto err;
446 
447 	return err;
448 
449 err:
450 	pthread_mutex_destroy(&wq->mutex);
451 
452 	return err;
453 }
454 default_symver(__ibv_init_wq, ibv_init_wq);
455 
456 void __ibv_cleanup_wq(struct ibv_wq *wq)
457 {
458 	pthread_cond_destroy(&wq->cond);
459 	pthread_mutex_destroy(&wq->mutex);
460 }
461 default_symver(__ibv_cleanup_wq, ibv_cleanup_wq);
462