xref: /linux/drivers/infiniband/core/verbs.c (revision 858259cf7d1c443c836a2022b78cb281f0a9b95e)
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  *
38  * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
39  */
40 
41 #include <linux/errno.h>
42 #include <linux/err.h>
43 
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_cache.h>
46 
47 /* Protection domains */
48 
49 struct ib_pd *ib_alloc_pd(struct ib_device *device)
50 {
51 	struct ib_pd *pd;
52 
53 	pd = device->alloc_pd(device, NULL, NULL);
54 
55 	if (!IS_ERR(pd)) {
56 		pd->device  = device;
57 		pd->uobject = NULL;
58 		atomic_set(&pd->usecnt, 0);
59 	}
60 
61 	return pd;
62 }
63 EXPORT_SYMBOL(ib_alloc_pd);
64 
65 int ib_dealloc_pd(struct ib_pd *pd)
66 {
67 	if (atomic_read(&pd->usecnt))
68 		return -EBUSY;
69 
70 	return pd->device->dealloc_pd(pd);
71 }
72 EXPORT_SYMBOL(ib_dealloc_pd);
73 
74 /* Address handles */
75 
76 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
77 {
78 	struct ib_ah *ah;
79 
80 	ah = pd->device->create_ah(pd, ah_attr);
81 
82 	if (!IS_ERR(ah)) {
83 		ah->device  = pd->device;
84 		ah->pd      = pd;
85 		ah->uobject = NULL;
86 		atomic_inc(&pd->usecnt);
87 	}
88 
89 	return ah;
90 }
91 EXPORT_SYMBOL(ib_create_ah);
92 
93 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
94 				   struct ib_grh *grh, u8 port_num)
95 {
96 	struct ib_ah_attr ah_attr;
97 	u32 flow_class;
98 	u16 gid_index;
99 	int ret;
100 
101 	memset(&ah_attr, 0, sizeof ah_attr);
102 	ah_attr.dlid = wc->slid;
103 	ah_attr.sl = wc->sl;
104 	ah_attr.src_path_bits = wc->dlid_path_bits;
105 	ah_attr.port_num = port_num;
106 
107 	if (wc->wc_flags & IB_WC_GRH) {
108 		ah_attr.ah_flags = IB_AH_GRH;
109 		ah_attr.grh.dgid = grh->dgid;
110 
111 		ret = ib_find_cached_gid(pd->device, &grh->sgid, &port_num,
112 					 &gid_index);
113 		if (ret)
114 			return ERR_PTR(ret);
115 
116 		ah_attr.grh.sgid_index = (u8) gid_index;
117 		flow_class = be32_to_cpu(grh->version_tclass_flow);
118 		ah_attr.grh.flow_label = flow_class & 0xFFFFF;
119 		ah_attr.grh.traffic_class = (flow_class >> 20) & 0xFF;
120 		ah_attr.grh.hop_limit = grh->hop_limit;
121 	}
122 
123 	return ib_create_ah(pd, &ah_attr);
124 }
125 EXPORT_SYMBOL(ib_create_ah_from_wc);
126 
127 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
128 {
129 	return ah->device->modify_ah ?
130 		ah->device->modify_ah(ah, ah_attr) :
131 		-ENOSYS;
132 }
133 EXPORT_SYMBOL(ib_modify_ah);
134 
135 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
136 {
137 	return ah->device->query_ah ?
138 		ah->device->query_ah(ah, ah_attr) :
139 		-ENOSYS;
140 }
141 EXPORT_SYMBOL(ib_query_ah);
142 
143 int ib_destroy_ah(struct ib_ah *ah)
144 {
145 	struct ib_pd *pd;
146 	int ret;
147 
148 	pd = ah->pd;
149 	ret = ah->device->destroy_ah(ah);
150 	if (!ret)
151 		atomic_dec(&pd->usecnt);
152 
153 	return ret;
154 }
155 EXPORT_SYMBOL(ib_destroy_ah);
156 
157 /* Shared receive queues */
158 
159 struct ib_srq *ib_create_srq(struct ib_pd *pd,
160 			     struct ib_srq_init_attr *srq_init_attr)
161 {
162 	struct ib_srq *srq;
163 
164 	if (!pd->device->create_srq)
165 		return ERR_PTR(-ENOSYS);
166 
167 	srq = pd->device->create_srq(pd, srq_init_attr, NULL);
168 
169 	if (!IS_ERR(srq)) {
170 		srq->device    	   = pd->device;
171 		srq->pd        	   = pd;
172 		srq->uobject       = NULL;
173 		srq->event_handler = srq_init_attr->event_handler;
174 		srq->srq_context   = srq_init_attr->srq_context;
175 		atomic_inc(&pd->usecnt);
176 		atomic_set(&srq->usecnt, 0);
177 	}
178 
179 	return srq;
180 }
181 EXPORT_SYMBOL(ib_create_srq);
182 
183 int ib_modify_srq(struct ib_srq *srq,
184 		  struct ib_srq_attr *srq_attr,
185 		  enum ib_srq_attr_mask srq_attr_mask)
186 {
187 	return srq->device->modify_srq(srq, srq_attr, srq_attr_mask);
188 }
189 EXPORT_SYMBOL(ib_modify_srq);
190 
191 int ib_query_srq(struct ib_srq *srq,
192 		 struct ib_srq_attr *srq_attr)
193 {
194 	return srq->device->query_srq ?
195 		srq->device->query_srq(srq, srq_attr) : -ENOSYS;
196 }
197 EXPORT_SYMBOL(ib_query_srq);
198 
199 int ib_destroy_srq(struct ib_srq *srq)
200 {
201 	struct ib_pd *pd;
202 	int ret;
203 
204 	if (atomic_read(&srq->usecnt))
205 		return -EBUSY;
206 
207 	pd = srq->pd;
208 
209 	ret = srq->device->destroy_srq(srq);
210 	if (!ret)
211 		atomic_dec(&pd->usecnt);
212 
213 	return ret;
214 }
215 EXPORT_SYMBOL(ib_destroy_srq);
216 
217 /* Queue pairs */
218 
219 struct ib_qp *ib_create_qp(struct ib_pd *pd,
220 			   struct ib_qp_init_attr *qp_init_attr)
221 {
222 	struct ib_qp *qp;
223 
224 	qp = pd->device->create_qp(pd, qp_init_attr, NULL);
225 
226 	if (!IS_ERR(qp)) {
227 		qp->device     	  = pd->device;
228 		qp->pd         	  = pd;
229 		qp->send_cq    	  = qp_init_attr->send_cq;
230 		qp->recv_cq    	  = qp_init_attr->recv_cq;
231 		qp->srq	       	  = qp_init_attr->srq;
232 		qp->uobject       = NULL;
233 		qp->event_handler = qp_init_attr->event_handler;
234 		qp->qp_context    = qp_init_attr->qp_context;
235 		qp->qp_type	  = qp_init_attr->qp_type;
236 		atomic_inc(&pd->usecnt);
237 		atomic_inc(&qp_init_attr->send_cq->usecnt);
238 		atomic_inc(&qp_init_attr->recv_cq->usecnt);
239 		if (qp_init_attr->srq)
240 			atomic_inc(&qp_init_attr->srq->usecnt);
241 	}
242 
243 	return qp;
244 }
245 EXPORT_SYMBOL(ib_create_qp);
246 
247 int ib_modify_qp(struct ib_qp *qp,
248 		 struct ib_qp_attr *qp_attr,
249 		 int qp_attr_mask)
250 {
251 	return qp->device->modify_qp(qp, qp_attr, qp_attr_mask);
252 }
253 EXPORT_SYMBOL(ib_modify_qp);
254 
255 int ib_query_qp(struct ib_qp *qp,
256 		struct ib_qp_attr *qp_attr,
257 		int qp_attr_mask,
258 		struct ib_qp_init_attr *qp_init_attr)
259 {
260 	return qp->device->query_qp ?
261 		qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
262 		-ENOSYS;
263 }
264 EXPORT_SYMBOL(ib_query_qp);
265 
266 int ib_destroy_qp(struct ib_qp *qp)
267 {
268 	struct ib_pd *pd;
269 	struct ib_cq *scq, *rcq;
270 	struct ib_srq *srq;
271 	int ret;
272 
273 	pd  = qp->pd;
274 	scq = qp->send_cq;
275 	rcq = qp->recv_cq;
276 	srq = qp->srq;
277 
278 	ret = qp->device->destroy_qp(qp);
279 	if (!ret) {
280 		atomic_dec(&pd->usecnt);
281 		atomic_dec(&scq->usecnt);
282 		atomic_dec(&rcq->usecnt);
283 		if (srq)
284 			atomic_dec(&srq->usecnt);
285 	}
286 
287 	return ret;
288 }
289 EXPORT_SYMBOL(ib_destroy_qp);
290 
291 /* Completion queues */
292 
293 struct ib_cq *ib_create_cq(struct ib_device *device,
294 			   ib_comp_handler comp_handler,
295 			   void (*event_handler)(struct ib_event *, void *),
296 			   void *cq_context, int cqe)
297 {
298 	struct ib_cq *cq;
299 
300 	cq = device->create_cq(device, cqe, NULL, NULL);
301 
302 	if (!IS_ERR(cq)) {
303 		cq->device        = device;
304 		cq->uobject       = NULL;
305 		cq->comp_handler  = comp_handler;
306 		cq->event_handler = event_handler;
307 		cq->cq_context    = cq_context;
308 		atomic_set(&cq->usecnt, 0);
309 	}
310 
311 	return cq;
312 }
313 EXPORT_SYMBOL(ib_create_cq);
314 
315 int ib_destroy_cq(struct ib_cq *cq)
316 {
317 	if (atomic_read(&cq->usecnt))
318 		return -EBUSY;
319 
320 	return cq->device->destroy_cq(cq);
321 }
322 EXPORT_SYMBOL(ib_destroy_cq);
323 
324 int ib_resize_cq(struct ib_cq *cq,
325                  int           cqe)
326 {
327 	int ret;
328 
329 	if (!cq->device->resize_cq)
330 		return -ENOSYS;
331 
332 	ret = cq->device->resize_cq(cq, &cqe);
333 	if (!ret)
334 		cq->cqe = cqe;
335 
336 	return ret;
337 }
338 EXPORT_SYMBOL(ib_resize_cq);
339 
340 /* Memory regions */
341 
342 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
343 {
344 	struct ib_mr *mr;
345 
346 	mr = pd->device->get_dma_mr(pd, mr_access_flags);
347 
348 	if (!IS_ERR(mr)) {
349 		mr->device  = pd->device;
350 		mr->pd      = pd;
351 		mr->uobject = NULL;
352 		atomic_inc(&pd->usecnt);
353 		atomic_set(&mr->usecnt, 0);
354 	}
355 
356 	return mr;
357 }
358 EXPORT_SYMBOL(ib_get_dma_mr);
359 
360 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
361 			     struct ib_phys_buf *phys_buf_array,
362 			     int num_phys_buf,
363 			     int mr_access_flags,
364 			     u64 *iova_start)
365 {
366 	struct ib_mr *mr;
367 
368 	mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
369 				     mr_access_flags, iova_start);
370 
371 	if (!IS_ERR(mr)) {
372 		mr->device  = pd->device;
373 		mr->pd      = pd;
374 		mr->uobject = NULL;
375 		atomic_inc(&pd->usecnt);
376 		atomic_set(&mr->usecnt, 0);
377 	}
378 
379 	return mr;
380 }
381 EXPORT_SYMBOL(ib_reg_phys_mr);
382 
383 int ib_rereg_phys_mr(struct ib_mr *mr,
384 		     int mr_rereg_mask,
385 		     struct ib_pd *pd,
386 		     struct ib_phys_buf *phys_buf_array,
387 		     int num_phys_buf,
388 		     int mr_access_flags,
389 		     u64 *iova_start)
390 {
391 	struct ib_pd *old_pd;
392 	int ret;
393 
394 	if (!mr->device->rereg_phys_mr)
395 		return -ENOSYS;
396 
397 	if (atomic_read(&mr->usecnt))
398 		return -EBUSY;
399 
400 	old_pd = mr->pd;
401 
402 	ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
403 					phys_buf_array, num_phys_buf,
404 					mr_access_flags, iova_start);
405 
406 	if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
407 		atomic_dec(&old_pd->usecnt);
408 		atomic_inc(&pd->usecnt);
409 	}
410 
411 	return ret;
412 }
413 EXPORT_SYMBOL(ib_rereg_phys_mr);
414 
415 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
416 {
417 	return mr->device->query_mr ?
418 		mr->device->query_mr(mr, mr_attr) : -ENOSYS;
419 }
420 EXPORT_SYMBOL(ib_query_mr);
421 
422 int ib_dereg_mr(struct ib_mr *mr)
423 {
424 	struct ib_pd *pd;
425 	int ret;
426 
427 	if (atomic_read(&mr->usecnt))
428 		return -EBUSY;
429 
430 	pd = mr->pd;
431 	ret = mr->device->dereg_mr(mr);
432 	if (!ret)
433 		atomic_dec(&pd->usecnt);
434 
435 	return ret;
436 }
437 EXPORT_SYMBOL(ib_dereg_mr);
438 
439 /* Memory windows */
440 
441 struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
442 {
443 	struct ib_mw *mw;
444 
445 	if (!pd->device->alloc_mw)
446 		return ERR_PTR(-ENOSYS);
447 
448 	mw = pd->device->alloc_mw(pd);
449 	if (!IS_ERR(mw)) {
450 		mw->device  = pd->device;
451 		mw->pd      = pd;
452 		mw->uobject = NULL;
453 		atomic_inc(&pd->usecnt);
454 	}
455 
456 	return mw;
457 }
458 EXPORT_SYMBOL(ib_alloc_mw);
459 
460 int ib_dealloc_mw(struct ib_mw *mw)
461 {
462 	struct ib_pd *pd;
463 	int ret;
464 
465 	pd = mw->pd;
466 	ret = mw->device->dealloc_mw(mw);
467 	if (!ret)
468 		atomic_dec(&pd->usecnt);
469 
470 	return ret;
471 }
472 EXPORT_SYMBOL(ib_dealloc_mw);
473 
474 /* "Fast" memory regions */
475 
476 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
477 			    int mr_access_flags,
478 			    struct ib_fmr_attr *fmr_attr)
479 {
480 	struct ib_fmr *fmr;
481 
482 	if (!pd->device->alloc_fmr)
483 		return ERR_PTR(-ENOSYS);
484 
485 	fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
486 	if (!IS_ERR(fmr)) {
487 		fmr->device = pd->device;
488 		fmr->pd     = pd;
489 		atomic_inc(&pd->usecnt);
490 	}
491 
492 	return fmr;
493 }
494 EXPORT_SYMBOL(ib_alloc_fmr);
495 
496 int ib_unmap_fmr(struct list_head *fmr_list)
497 {
498 	struct ib_fmr *fmr;
499 
500 	if (list_empty(fmr_list))
501 		return 0;
502 
503 	fmr = list_entry(fmr_list->next, struct ib_fmr, list);
504 	return fmr->device->unmap_fmr(fmr_list);
505 }
506 EXPORT_SYMBOL(ib_unmap_fmr);
507 
508 int ib_dealloc_fmr(struct ib_fmr *fmr)
509 {
510 	struct ib_pd *pd;
511 	int ret;
512 
513 	pd = fmr->pd;
514 	ret = fmr->device->dealloc_fmr(fmr);
515 	if (!ret)
516 		atomic_dec(&pd->usecnt);
517 
518 	return ret;
519 }
520 EXPORT_SYMBOL(ib_dealloc_fmr);
521 
522 /* Multicast groups */
523 
524 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
525 {
526 	if (!qp->device->attach_mcast)
527 		return -ENOSYS;
528 	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
529 		return -EINVAL;
530 
531 	return qp->device->attach_mcast(qp, gid, lid);
532 }
533 EXPORT_SYMBOL(ib_attach_mcast);
534 
535 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
536 {
537 	if (!qp->device->detach_mcast)
538 		return -ENOSYS;
539 	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
540 		return -EINVAL;
541 
542 	return qp->device->detach_mcast(qp, gid, lid);
543 }
544 EXPORT_SYMBOL(ib_detach_mcast);
545