xref: /linux/drivers/infiniband/core/verbs.c (revision 9ce7677cfd7cd871adb457c80bea3b581b839641)
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  *
38  * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
39  */
40 
41 #include <linux/errno.h>
42 #include <linux/err.h>
43 #include <linux/string.h>
44 
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_cache.h>
47 
48 /* Protection domains */
49 
50 struct ib_pd *ib_alloc_pd(struct ib_device *device)
51 {
52 	struct ib_pd *pd;
53 
54 	pd = device->alloc_pd(device, NULL, NULL);
55 
56 	if (!IS_ERR(pd)) {
57 		pd->device  = device;
58 		pd->uobject = NULL;
59 		atomic_set(&pd->usecnt, 0);
60 	}
61 
62 	return pd;
63 }
64 EXPORT_SYMBOL(ib_alloc_pd);
65 
66 int ib_dealloc_pd(struct ib_pd *pd)
67 {
68 	if (atomic_read(&pd->usecnt))
69 		return -EBUSY;
70 
71 	return pd->device->dealloc_pd(pd);
72 }
73 EXPORT_SYMBOL(ib_dealloc_pd);
74 
75 /* Address handles */
76 
77 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
78 {
79 	struct ib_ah *ah;
80 
81 	ah = pd->device->create_ah(pd, ah_attr);
82 
83 	if (!IS_ERR(ah)) {
84 		ah->device  = pd->device;
85 		ah->pd      = pd;
86 		ah->uobject = NULL;
87 		atomic_inc(&pd->usecnt);
88 	}
89 
90 	return ah;
91 }
92 EXPORT_SYMBOL(ib_create_ah);
93 
94 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
95 				   struct ib_grh *grh, u8 port_num)
96 {
97 	struct ib_ah_attr ah_attr;
98 	u32 flow_class;
99 	u16 gid_index;
100 	int ret;
101 
102 	memset(&ah_attr, 0, sizeof ah_attr);
103 	ah_attr.dlid = wc->slid;
104 	ah_attr.sl = wc->sl;
105 	ah_attr.src_path_bits = wc->dlid_path_bits;
106 	ah_attr.port_num = port_num;
107 
108 	if (wc->wc_flags & IB_WC_GRH) {
109 		ah_attr.ah_flags = IB_AH_GRH;
110 		ah_attr.grh.dgid = grh->dgid;
111 
112 		ret = ib_find_cached_gid(pd->device, &grh->sgid, &port_num,
113 					 &gid_index);
114 		if (ret)
115 			return ERR_PTR(ret);
116 
117 		ah_attr.grh.sgid_index = (u8) gid_index;
118 		flow_class = be32_to_cpu(grh->version_tclass_flow);
119 		ah_attr.grh.flow_label = flow_class & 0xFFFFF;
120 		ah_attr.grh.traffic_class = (flow_class >> 20) & 0xFF;
121 		ah_attr.grh.hop_limit = grh->hop_limit;
122 	}
123 
124 	return ib_create_ah(pd, &ah_attr);
125 }
126 EXPORT_SYMBOL(ib_create_ah_from_wc);
127 
128 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
129 {
130 	return ah->device->modify_ah ?
131 		ah->device->modify_ah(ah, ah_attr) :
132 		-ENOSYS;
133 }
134 EXPORT_SYMBOL(ib_modify_ah);
135 
136 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
137 {
138 	return ah->device->query_ah ?
139 		ah->device->query_ah(ah, ah_attr) :
140 		-ENOSYS;
141 }
142 EXPORT_SYMBOL(ib_query_ah);
143 
144 int ib_destroy_ah(struct ib_ah *ah)
145 {
146 	struct ib_pd *pd;
147 	int ret;
148 
149 	pd = ah->pd;
150 	ret = ah->device->destroy_ah(ah);
151 	if (!ret)
152 		atomic_dec(&pd->usecnt);
153 
154 	return ret;
155 }
156 EXPORT_SYMBOL(ib_destroy_ah);
157 
158 /* Shared receive queues */
159 
160 struct ib_srq *ib_create_srq(struct ib_pd *pd,
161 			     struct ib_srq_init_attr *srq_init_attr)
162 {
163 	struct ib_srq *srq;
164 
165 	if (!pd->device->create_srq)
166 		return ERR_PTR(-ENOSYS);
167 
168 	srq = pd->device->create_srq(pd, srq_init_attr, NULL);
169 
170 	if (!IS_ERR(srq)) {
171 		srq->device    	   = pd->device;
172 		srq->pd        	   = pd;
173 		srq->uobject       = NULL;
174 		srq->event_handler = srq_init_attr->event_handler;
175 		srq->srq_context   = srq_init_attr->srq_context;
176 		atomic_inc(&pd->usecnt);
177 		atomic_set(&srq->usecnt, 0);
178 	}
179 
180 	return srq;
181 }
182 EXPORT_SYMBOL(ib_create_srq);
183 
184 int ib_modify_srq(struct ib_srq *srq,
185 		  struct ib_srq_attr *srq_attr,
186 		  enum ib_srq_attr_mask srq_attr_mask)
187 {
188 	return srq->device->modify_srq(srq, srq_attr, srq_attr_mask);
189 }
190 EXPORT_SYMBOL(ib_modify_srq);
191 
192 int ib_query_srq(struct ib_srq *srq,
193 		 struct ib_srq_attr *srq_attr)
194 {
195 	return srq->device->query_srq ?
196 		srq->device->query_srq(srq, srq_attr) : -ENOSYS;
197 }
198 EXPORT_SYMBOL(ib_query_srq);
199 
200 int ib_destroy_srq(struct ib_srq *srq)
201 {
202 	struct ib_pd *pd;
203 	int ret;
204 
205 	if (atomic_read(&srq->usecnt))
206 		return -EBUSY;
207 
208 	pd = srq->pd;
209 
210 	ret = srq->device->destroy_srq(srq);
211 	if (!ret)
212 		atomic_dec(&pd->usecnt);
213 
214 	return ret;
215 }
216 EXPORT_SYMBOL(ib_destroy_srq);
217 
218 /* Queue pairs */
219 
220 struct ib_qp *ib_create_qp(struct ib_pd *pd,
221 			   struct ib_qp_init_attr *qp_init_attr)
222 {
223 	struct ib_qp *qp;
224 
225 	qp = pd->device->create_qp(pd, qp_init_attr, NULL);
226 
227 	if (!IS_ERR(qp)) {
228 		qp->device     	  = pd->device;
229 		qp->pd         	  = pd;
230 		qp->send_cq    	  = qp_init_attr->send_cq;
231 		qp->recv_cq    	  = qp_init_attr->recv_cq;
232 		qp->srq	       	  = qp_init_attr->srq;
233 		qp->uobject       = NULL;
234 		qp->event_handler = qp_init_attr->event_handler;
235 		qp->qp_context    = qp_init_attr->qp_context;
236 		qp->qp_type	  = qp_init_attr->qp_type;
237 		atomic_inc(&pd->usecnt);
238 		atomic_inc(&qp_init_attr->send_cq->usecnt);
239 		atomic_inc(&qp_init_attr->recv_cq->usecnt);
240 		if (qp_init_attr->srq)
241 			atomic_inc(&qp_init_attr->srq->usecnt);
242 	}
243 
244 	return qp;
245 }
246 EXPORT_SYMBOL(ib_create_qp);
247 
248 int ib_modify_qp(struct ib_qp *qp,
249 		 struct ib_qp_attr *qp_attr,
250 		 int qp_attr_mask)
251 {
252 	return qp->device->modify_qp(qp, qp_attr, qp_attr_mask);
253 }
254 EXPORT_SYMBOL(ib_modify_qp);
255 
256 int ib_query_qp(struct ib_qp *qp,
257 		struct ib_qp_attr *qp_attr,
258 		int qp_attr_mask,
259 		struct ib_qp_init_attr *qp_init_attr)
260 {
261 	return qp->device->query_qp ?
262 		qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
263 		-ENOSYS;
264 }
265 EXPORT_SYMBOL(ib_query_qp);
266 
267 int ib_destroy_qp(struct ib_qp *qp)
268 {
269 	struct ib_pd *pd;
270 	struct ib_cq *scq, *rcq;
271 	struct ib_srq *srq;
272 	int ret;
273 
274 	pd  = qp->pd;
275 	scq = qp->send_cq;
276 	rcq = qp->recv_cq;
277 	srq = qp->srq;
278 
279 	ret = qp->device->destroy_qp(qp);
280 	if (!ret) {
281 		atomic_dec(&pd->usecnt);
282 		atomic_dec(&scq->usecnt);
283 		atomic_dec(&rcq->usecnt);
284 		if (srq)
285 			atomic_dec(&srq->usecnt);
286 	}
287 
288 	return ret;
289 }
290 EXPORT_SYMBOL(ib_destroy_qp);
291 
292 /* Completion queues */
293 
294 struct ib_cq *ib_create_cq(struct ib_device *device,
295 			   ib_comp_handler comp_handler,
296 			   void (*event_handler)(struct ib_event *, void *),
297 			   void *cq_context, int cqe)
298 {
299 	struct ib_cq *cq;
300 
301 	cq = device->create_cq(device, cqe, NULL, NULL);
302 
303 	if (!IS_ERR(cq)) {
304 		cq->device        = device;
305 		cq->uobject       = NULL;
306 		cq->comp_handler  = comp_handler;
307 		cq->event_handler = event_handler;
308 		cq->cq_context    = cq_context;
309 		atomic_set(&cq->usecnt, 0);
310 	}
311 
312 	return cq;
313 }
314 EXPORT_SYMBOL(ib_create_cq);
315 
316 int ib_destroy_cq(struct ib_cq *cq)
317 {
318 	if (atomic_read(&cq->usecnt))
319 		return -EBUSY;
320 
321 	return cq->device->destroy_cq(cq);
322 }
323 EXPORT_SYMBOL(ib_destroy_cq);
324 
325 int ib_resize_cq(struct ib_cq *cq,
326                  int           cqe)
327 {
328 	return cq->device->resize_cq ?
329 		cq->device->resize_cq(cq, cqe) : -ENOSYS;
330 }
331 EXPORT_SYMBOL(ib_resize_cq);
332 
333 /* Memory regions */
334 
335 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
336 {
337 	struct ib_mr *mr;
338 
339 	mr = pd->device->get_dma_mr(pd, mr_access_flags);
340 
341 	if (!IS_ERR(mr)) {
342 		mr->device  = pd->device;
343 		mr->pd      = pd;
344 		mr->uobject = NULL;
345 		atomic_inc(&pd->usecnt);
346 		atomic_set(&mr->usecnt, 0);
347 	}
348 
349 	return mr;
350 }
351 EXPORT_SYMBOL(ib_get_dma_mr);
352 
353 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
354 			     struct ib_phys_buf *phys_buf_array,
355 			     int num_phys_buf,
356 			     int mr_access_flags,
357 			     u64 *iova_start)
358 {
359 	struct ib_mr *mr;
360 
361 	mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
362 				     mr_access_flags, iova_start);
363 
364 	if (!IS_ERR(mr)) {
365 		mr->device  = pd->device;
366 		mr->pd      = pd;
367 		mr->uobject = NULL;
368 		atomic_inc(&pd->usecnt);
369 		atomic_set(&mr->usecnt, 0);
370 	}
371 
372 	return mr;
373 }
374 EXPORT_SYMBOL(ib_reg_phys_mr);
375 
376 int ib_rereg_phys_mr(struct ib_mr *mr,
377 		     int mr_rereg_mask,
378 		     struct ib_pd *pd,
379 		     struct ib_phys_buf *phys_buf_array,
380 		     int num_phys_buf,
381 		     int mr_access_flags,
382 		     u64 *iova_start)
383 {
384 	struct ib_pd *old_pd;
385 	int ret;
386 
387 	if (!mr->device->rereg_phys_mr)
388 		return -ENOSYS;
389 
390 	if (atomic_read(&mr->usecnt))
391 		return -EBUSY;
392 
393 	old_pd = mr->pd;
394 
395 	ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
396 					phys_buf_array, num_phys_buf,
397 					mr_access_flags, iova_start);
398 
399 	if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
400 		atomic_dec(&old_pd->usecnt);
401 		atomic_inc(&pd->usecnt);
402 	}
403 
404 	return ret;
405 }
406 EXPORT_SYMBOL(ib_rereg_phys_mr);
407 
408 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
409 {
410 	return mr->device->query_mr ?
411 		mr->device->query_mr(mr, mr_attr) : -ENOSYS;
412 }
413 EXPORT_SYMBOL(ib_query_mr);
414 
415 int ib_dereg_mr(struct ib_mr *mr)
416 {
417 	struct ib_pd *pd;
418 	int ret;
419 
420 	if (atomic_read(&mr->usecnt))
421 		return -EBUSY;
422 
423 	pd = mr->pd;
424 	ret = mr->device->dereg_mr(mr);
425 	if (!ret)
426 		atomic_dec(&pd->usecnt);
427 
428 	return ret;
429 }
430 EXPORT_SYMBOL(ib_dereg_mr);
431 
432 /* Memory windows */
433 
434 struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
435 {
436 	struct ib_mw *mw;
437 
438 	if (!pd->device->alloc_mw)
439 		return ERR_PTR(-ENOSYS);
440 
441 	mw = pd->device->alloc_mw(pd);
442 	if (!IS_ERR(mw)) {
443 		mw->device  = pd->device;
444 		mw->pd      = pd;
445 		mw->uobject = NULL;
446 		atomic_inc(&pd->usecnt);
447 	}
448 
449 	return mw;
450 }
451 EXPORT_SYMBOL(ib_alloc_mw);
452 
453 int ib_dealloc_mw(struct ib_mw *mw)
454 {
455 	struct ib_pd *pd;
456 	int ret;
457 
458 	pd = mw->pd;
459 	ret = mw->device->dealloc_mw(mw);
460 	if (!ret)
461 		atomic_dec(&pd->usecnt);
462 
463 	return ret;
464 }
465 EXPORT_SYMBOL(ib_dealloc_mw);
466 
467 /* "Fast" memory regions */
468 
469 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
470 			    int mr_access_flags,
471 			    struct ib_fmr_attr *fmr_attr)
472 {
473 	struct ib_fmr *fmr;
474 
475 	if (!pd->device->alloc_fmr)
476 		return ERR_PTR(-ENOSYS);
477 
478 	fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
479 	if (!IS_ERR(fmr)) {
480 		fmr->device = pd->device;
481 		fmr->pd     = pd;
482 		atomic_inc(&pd->usecnt);
483 	}
484 
485 	return fmr;
486 }
487 EXPORT_SYMBOL(ib_alloc_fmr);
488 
489 int ib_unmap_fmr(struct list_head *fmr_list)
490 {
491 	struct ib_fmr *fmr;
492 
493 	if (list_empty(fmr_list))
494 		return 0;
495 
496 	fmr = list_entry(fmr_list->next, struct ib_fmr, list);
497 	return fmr->device->unmap_fmr(fmr_list);
498 }
499 EXPORT_SYMBOL(ib_unmap_fmr);
500 
501 int ib_dealloc_fmr(struct ib_fmr *fmr)
502 {
503 	struct ib_pd *pd;
504 	int ret;
505 
506 	pd = fmr->pd;
507 	ret = fmr->device->dealloc_fmr(fmr);
508 	if (!ret)
509 		atomic_dec(&pd->usecnt);
510 
511 	return ret;
512 }
513 EXPORT_SYMBOL(ib_dealloc_fmr);
514 
515 /* Multicast groups */
516 
517 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
518 {
519 	if (!qp->device->attach_mcast)
520 		return -ENOSYS;
521 	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
522 		return -EINVAL;
523 
524 	return qp->device->attach_mcast(qp, gid, lid);
525 }
526 EXPORT_SYMBOL(ib_attach_mcast);
527 
528 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
529 {
530 	if (!qp->device->detach_mcast)
531 		return -ENOSYS;
532 	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
533 		return -EINVAL;
534 
535 	return qp->device->detach_mcast(qp, gid, lid);
536 }
537 EXPORT_SYMBOL(ib_detach_mcast);
538