xref: /linux/drivers/infiniband/core/verbs.c (revision 20d0021394c1b070bf04b22c5bc8fdb437edd4c5)
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
8  *
9  * This software is available to you under a choice of one of two
10  * licenses.  You may choose to be licensed under the terms of the GNU
11  * General Public License (GPL) Version 2, available from the file
12  * COPYING in the main directory of this source tree, or the
13  * OpenIB.org BSD license below:
14  *
15  *     Redistribution and use in source and binary forms, with or
16  *     without modification, are permitted provided that the following
17  *     conditions are met:
18  *
19  *      - Redistributions of source code must retain the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer.
22  *
23  *      - Redistributions in binary form must reproduce the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer in the documentation and/or other materials
26  *        provided with the distribution.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35  * SOFTWARE.
36  *
37  * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
38  */
39 
40 #include <linux/errno.h>
41 #include <linux/err.h>
42 
43 #include <ib_verbs.h>
44 
45 /* Protection domains */
46 
47 struct ib_pd *ib_alloc_pd(struct ib_device *device)
48 {
49 	struct ib_pd *pd;
50 
51 	pd = device->alloc_pd(device, NULL, NULL);
52 
53 	if (!IS_ERR(pd)) {
54 		pd->device  = device;
55 		pd->uobject = NULL;
56 		atomic_set(&pd->usecnt, 0);
57 	}
58 
59 	return pd;
60 }
61 EXPORT_SYMBOL(ib_alloc_pd);
62 
63 int ib_dealloc_pd(struct ib_pd *pd)
64 {
65 	if (atomic_read(&pd->usecnt))
66 		return -EBUSY;
67 
68 	return pd->device->dealloc_pd(pd);
69 }
70 EXPORT_SYMBOL(ib_dealloc_pd);
71 
72 /* Address handles */
73 
74 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
75 {
76 	struct ib_ah *ah;
77 
78 	ah = pd->device->create_ah(pd, ah_attr);
79 
80 	if (!IS_ERR(ah)) {
81 		ah->device  = pd->device;
82 		ah->pd      = pd;
83 		ah->uobject = NULL;
84 		atomic_inc(&pd->usecnt);
85 	}
86 
87 	return ah;
88 }
89 EXPORT_SYMBOL(ib_create_ah);
90 
91 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
92 {
93 	return ah->device->modify_ah ?
94 		ah->device->modify_ah(ah, ah_attr) :
95 		-ENOSYS;
96 }
97 EXPORT_SYMBOL(ib_modify_ah);
98 
99 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
100 {
101 	return ah->device->query_ah ?
102 		ah->device->query_ah(ah, ah_attr) :
103 		-ENOSYS;
104 }
105 EXPORT_SYMBOL(ib_query_ah);
106 
107 int ib_destroy_ah(struct ib_ah *ah)
108 {
109 	struct ib_pd *pd;
110 	int ret;
111 
112 	pd = ah->pd;
113 	ret = ah->device->destroy_ah(ah);
114 	if (!ret)
115 		atomic_dec(&pd->usecnt);
116 
117 	return ret;
118 }
119 EXPORT_SYMBOL(ib_destroy_ah);
120 
121 /* Queue pairs */
122 
123 struct ib_qp *ib_create_qp(struct ib_pd *pd,
124 			   struct ib_qp_init_attr *qp_init_attr)
125 {
126 	struct ib_qp *qp;
127 
128 	qp = pd->device->create_qp(pd, qp_init_attr, NULL);
129 
130 	if (!IS_ERR(qp)) {
131 		qp->device     	  = pd->device;
132 		qp->pd         	  = pd;
133 		qp->send_cq    	  = qp_init_attr->send_cq;
134 		qp->recv_cq    	  = qp_init_attr->recv_cq;
135 		qp->srq	       	  = qp_init_attr->srq;
136 		qp->uobject       = NULL;
137 		qp->event_handler = qp_init_attr->event_handler;
138 		qp->qp_context    = qp_init_attr->qp_context;
139 		qp->qp_type	  = qp_init_attr->qp_type;
140 		atomic_inc(&pd->usecnt);
141 		atomic_inc(&qp_init_attr->send_cq->usecnt);
142 		atomic_inc(&qp_init_attr->recv_cq->usecnt);
143 		if (qp_init_attr->srq)
144 			atomic_inc(&qp_init_attr->srq->usecnt);
145 	}
146 
147 	return qp;
148 }
149 EXPORT_SYMBOL(ib_create_qp);
150 
151 int ib_modify_qp(struct ib_qp *qp,
152 		 struct ib_qp_attr *qp_attr,
153 		 int qp_attr_mask)
154 {
155 	return qp->device->modify_qp(qp, qp_attr, qp_attr_mask);
156 }
157 EXPORT_SYMBOL(ib_modify_qp);
158 
159 int ib_query_qp(struct ib_qp *qp,
160 		struct ib_qp_attr *qp_attr,
161 		int qp_attr_mask,
162 		struct ib_qp_init_attr *qp_init_attr)
163 {
164 	return qp->device->query_qp ?
165 		qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
166 		-ENOSYS;
167 }
168 EXPORT_SYMBOL(ib_query_qp);
169 
170 int ib_destroy_qp(struct ib_qp *qp)
171 {
172 	struct ib_pd *pd;
173 	struct ib_cq *scq, *rcq;
174 	struct ib_srq *srq;
175 	int ret;
176 
177 	pd  = qp->pd;
178 	scq = qp->send_cq;
179 	rcq = qp->recv_cq;
180 	srq = qp->srq;
181 
182 	ret = qp->device->destroy_qp(qp);
183 	if (!ret) {
184 		atomic_dec(&pd->usecnt);
185 		atomic_dec(&scq->usecnt);
186 		atomic_dec(&rcq->usecnt);
187 		if (srq)
188 			atomic_dec(&srq->usecnt);
189 	}
190 
191 	return ret;
192 }
193 EXPORT_SYMBOL(ib_destroy_qp);
194 
195 /* Completion queues */
196 
197 struct ib_cq *ib_create_cq(struct ib_device *device,
198 			   ib_comp_handler comp_handler,
199 			   void (*event_handler)(struct ib_event *, void *),
200 			   void *cq_context, int cqe)
201 {
202 	struct ib_cq *cq;
203 
204 	cq = device->create_cq(device, cqe, NULL, NULL);
205 
206 	if (!IS_ERR(cq)) {
207 		cq->device        = device;
208 		cq->uobject       = NULL;
209 		cq->comp_handler  = comp_handler;
210 		cq->event_handler = event_handler;
211 		cq->cq_context    = cq_context;
212 		atomic_set(&cq->usecnt, 0);
213 	}
214 
215 	return cq;
216 }
217 EXPORT_SYMBOL(ib_create_cq);
218 
219 int ib_destroy_cq(struct ib_cq *cq)
220 {
221 	if (atomic_read(&cq->usecnt))
222 		return -EBUSY;
223 
224 	return cq->device->destroy_cq(cq);
225 }
226 EXPORT_SYMBOL(ib_destroy_cq);
227 
228 int ib_resize_cq(struct ib_cq *cq,
229                  int           cqe)
230 {
231 	int ret;
232 
233 	if (!cq->device->resize_cq)
234 		return -ENOSYS;
235 
236 	ret = cq->device->resize_cq(cq, &cqe);
237 	if (!ret)
238 		cq->cqe = cqe;
239 
240 	return ret;
241 }
242 EXPORT_SYMBOL(ib_resize_cq);
243 
244 /* Memory regions */
245 
246 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
247 {
248 	struct ib_mr *mr;
249 
250 	mr = pd->device->get_dma_mr(pd, mr_access_flags);
251 
252 	if (!IS_ERR(mr)) {
253 		mr->device  = pd->device;
254 		mr->pd      = pd;
255 		mr->uobject = NULL;
256 		atomic_inc(&pd->usecnt);
257 		atomic_set(&mr->usecnt, 0);
258 	}
259 
260 	return mr;
261 }
262 EXPORT_SYMBOL(ib_get_dma_mr);
263 
264 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
265 			     struct ib_phys_buf *phys_buf_array,
266 			     int num_phys_buf,
267 			     int mr_access_flags,
268 			     u64 *iova_start)
269 {
270 	struct ib_mr *mr;
271 
272 	mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
273 				     mr_access_flags, iova_start);
274 
275 	if (!IS_ERR(mr)) {
276 		mr->device  = pd->device;
277 		mr->pd      = pd;
278 		mr->uobject = NULL;
279 		atomic_inc(&pd->usecnt);
280 		atomic_set(&mr->usecnt, 0);
281 	}
282 
283 	return mr;
284 }
285 EXPORT_SYMBOL(ib_reg_phys_mr);
286 
287 int ib_rereg_phys_mr(struct ib_mr *mr,
288 		     int mr_rereg_mask,
289 		     struct ib_pd *pd,
290 		     struct ib_phys_buf *phys_buf_array,
291 		     int num_phys_buf,
292 		     int mr_access_flags,
293 		     u64 *iova_start)
294 {
295 	struct ib_pd *old_pd;
296 	int ret;
297 
298 	if (!mr->device->rereg_phys_mr)
299 		return -ENOSYS;
300 
301 	if (atomic_read(&mr->usecnt))
302 		return -EBUSY;
303 
304 	old_pd = mr->pd;
305 
306 	ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
307 					phys_buf_array, num_phys_buf,
308 					mr_access_flags, iova_start);
309 
310 	if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
311 		atomic_dec(&old_pd->usecnt);
312 		atomic_inc(&pd->usecnt);
313 	}
314 
315 	return ret;
316 }
317 EXPORT_SYMBOL(ib_rereg_phys_mr);
318 
319 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
320 {
321 	return mr->device->query_mr ?
322 		mr->device->query_mr(mr, mr_attr) : -ENOSYS;
323 }
324 EXPORT_SYMBOL(ib_query_mr);
325 
326 int ib_dereg_mr(struct ib_mr *mr)
327 {
328 	struct ib_pd *pd;
329 	int ret;
330 
331 	if (atomic_read(&mr->usecnt))
332 		return -EBUSY;
333 
334 	pd = mr->pd;
335 	ret = mr->device->dereg_mr(mr);
336 	if (!ret)
337 		atomic_dec(&pd->usecnt);
338 
339 	return ret;
340 }
341 EXPORT_SYMBOL(ib_dereg_mr);
342 
343 /* Memory windows */
344 
345 struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
346 {
347 	struct ib_mw *mw;
348 
349 	if (!pd->device->alloc_mw)
350 		return ERR_PTR(-ENOSYS);
351 
352 	mw = pd->device->alloc_mw(pd);
353 	if (!IS_ERR(mw)) {
354 		mw->device  = pd->device;
355 		mw->pd      = pd;
356 		mw->uobject = NULL;
357 		atomic_inc(&pd->usecnt);
358 	}
359 
360 	return mw;
361 }
362 EXPORT_SYMBOL(ib_alloc_mw);
363 
364 int ib_dealloc_mw(struct ib_mw *mw)
365 {
366 	struct ib_pd *pd;
367 	int ret;
368 
369 	pd = mw->pd;
370 	ret = mw->device->dealloc_mw(mw);
371 	if (!ret)
372 		atomic_dec(&pd->usecnt);
373 
374 	return ret;
375 }
376 EXPORT_SYMBOL(ib_dealloc_mw);
377 
378 /* "Fast" memory regions */
379 
380 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
381 			    int mr_access_flags,
382 			    struct ib_fmr_attr *fmr_attr)
383 {
384 	struct ib_fmr *fmr;
385 
386 	if (!pd->device->alloc_fmr)
387 		return ERR_PTR(-ENOSYS);
388 
389 	fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
390 	if (!IS_ERR(fmr)) {
391 		fmr->device = pd->device;
392 		fmr->pd     = pd;
393 		atomic_inc(&pd->usecnt);
394 	}
395 
396 	return fmr;
397 }
398 EXPORT_SYMBOL(ib_alloc_fmr);
399 
400 int ib_unmap_fmr(struct list_head *fmr_list)
401 {
402 	struct ib_fmr *fmr;
403 
404 	if (list_empty(fmr_list))
405 		return 0;
406 
407 	fmr = list_entry(fmr_list->next, struct ib_fmr, list);
408 	return fmr->device->unmap_fmr(fmr_list);
409 }
410 EXPORT_SYMBOL(ib_unmap_fmr);
411 
412 int ib_dealloc_fmr(struct ib_fmr *fmr)
413 {
414 	struct ib_pd *pd;
415 	int ret;
416 
417 	pd = fmr->pd;
418 	ret = fmr->device->dealloc_fmr(fmr);
419 	if (!ret)
420 		atomic_dec(&pd->usecnt);
421 
422 	return ret;
423 }
424 EXPORT_SYMBOL(ib_dealloc_fmr);
425 
426 /* Multicast groups */
427 
428 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
429 {
430 	return qp->device->attach_mcast ?
431 		qp->device->attach_mcast(qp, gid, lid) :
432 		-ENOSYS;
433 }
434 EXPORT_SYMBOL(ib_attach_mcast);
435 
436 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
437 {
438 	return qp->device->detach_mcast ?
439 		qp->device->detach_mcast(qp, gid, lid) :
440 		-ENOSYS;
441 }
442 EXPORT_SYMBOL(ib_detach_mcast);
443