xref: /linux/drivers/infiniband/core/verbs.c (revision 5e8d780d745c1619aba81fe7166c5a4b5cad2b84)
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  *
38  * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
39  */
40 
41 #include <linux/errno.h>
42 #include <linux/err.h>
43 #include <linux/string.h>
44 
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_cache.h>
47 
48 int ib_rate_to_mult(enum ib_rate rate)
49 {
50 	switch (rate) {
51 	case IB_RATE_2_5_GBPS: return  1;
52 	case IB_RATE_5_GBPS:   return  2;
53 	case IB_RATE_10_GBPS:  return  4;
54 	case IB_RATE_20_GBPS:  return  8;
55 	case IB_RATE_30_GBPS:  return 12;
56 	case IB_RATE_40_GBPS:  return 16;
57 	case IB_RATE_60_GBPS:  return 24;
58 	case IB_RATE_80_GBPS:  return 32;
59 	case IB_RATE_120_GBPS: return 48;
60 	default:	       return -1;
61 	}
62 }
63 EXPORT_SYMBOL(ib_rate_to_mult);
64 
65 enum ib_rate mult_to_ib_rate(int mult)
66 {
67 	switch (mult) {
68 	case 1:  return IB_RATE_2_5_GBPS;
69 	case 2:  return IB_RATE_5_GBPS;
70 	case 4:  return IB_RATE_10_GBPS;
71 	case 8:  return IB_RATE_20_GBPS;
72 	case 12: return IB_RATE_30_GBPS;
73 	case 16: return IB_RATE_40_GBPS;
74 	case 24: return IB_RATE_60_GBPS;
75 	case 32: return IB_RATE_80_GBPS;
76 	case 48: return IB_RATE_120_GBPS;
77 	default: return IB_RATE_PORT_CURRENT;
78 	}
79 }
80 EXPORT_SYMBOL(mult_to_ib_rate);
81 
82 /* Protection domains */
83 
84 struct ib_pd *ib_alloc_pd(struct ib_device *device)
85 {
86 	struct ib_pd *pd;
87 
88 	pd = device->alloc_pd(device, NULL, NULL);
89 
90 	if (!IS_ERR(pd)) {
91 		pd->device  = device;
92 		pd->uobject = NULL;
93 		atomic_set(&pd->usecnt, 0);
94 	}
95 
96 	return pd;
97 }
98 EXPORT_SYMBOL(ib_alloc_pd);
99 
100 int ib_dealloc_pd(struct ib_pd *pd)
101 {
102 	if (atomic_read(&pd->usecnt))
103 		return -EBUSY;
104 
105 	return pd->device->dealloc_pd(pd);
106 }
107 EXPORT_SYMBOL(ib_dealloc_pd);
108 
109 /* Address handles */
110 
111 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
112 {
113 	struct ib_ah *ah;
114 
115 	ah = pd->device->create_ah(pd, ah_attr);
116 
117 	if (!IS_ERR(ah)) {
118 		ah->device  = pd->device;
119 		ah->pd      = pd;
120 		ah->uobject = NULL;
121 		atomic_inc(&pd->usecnt);
122 	}
123 
124 	return ah;
125 }
126 EXPORT_SYMBOL(ib_create_ah);
127 
128 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
129 		       struct ib_grh *grh, struct ib_ah_attr *ah_attr)
130 {
131 	u32 flow_class;
132 	u16 gid_index;
133 	int ret;
134 
135 	memset(ah_attr, 0, sizeof *ah_attr);
136 	ah_attr->dlid = wc->slid;
137 	ah_attr->sl = wc->sl;
138 	ah_attr->src_path_bits = wc->dlid_path_bits;
139 	ah_attr->port_num = port_num;
140 
141 	if (wc->wc_flags & IB_WC_GRH) {
142 		ah_attr->ah_flags = IB_AH_GRH;
143 		ah_attr->grh.dgid = grh->sgid;
144 
145 		ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
146 					 &gid_index);
147 		if (ret)
148 			return ret;
149 
150 		ah_attr->grh.sgid_index = (u8) gid_index;
151 		flow_class = be32_to_cpu(grh->version_tclass_flow);
152 		ah_attr->grh.flow_label = flow_class & 0xFFFFF;
153 		ah_attr->grh.hop_limit = grh->hop_limit;
154 		ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
155 	}
156 	return 0;
157 }
158 EXPORT_SYMBOL(ib_init_ah_from_wc);
159 
160 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
161 				   struct ib_grh *grh, u8 port_num)
162 {
163 	struct ib_ah_attr ah_attr;
164 	int ret;
165 
166 	ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
167 	if (ret)
168 		return ERR_PTR(ret);
169 
170 	return ib_create_ah(pd, &ah_attr);
171 }
172 EXPORT_SYMBOL(ib_create_ah_from_wc);
173 
174 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
175 {
176 	return ah->device->modify_ah ?
177 		ah->device->modify_ah(ah, ah_attr) :
178 		-ENOSYS;
179 }
180 EXPORT_SYMBOL(ib_modify_ah);
181 
182 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
183 {
184 	return ah->device->query_ah ?
185 		ah->device->query_ah(ah, ah_attr) :
186 		-ENOSYS;
187 }
188 EXPORT_SYMBOL(ib_query_ah);
189 
190 int ib_destroy_ah(struct ib_ah *ah)
191 {
192 	struct ib_pd *pd;
193 	int ret;
194 
195 	pd = ah->pd;
196 	ret = ah->device->destroy_ah(ah);
197 	if (!ret)
198 		atomic_dec(&pd->usecnt);
199 
200 	return ret;
201 }
202 EXPORT_SYMBOL(ib_destroy_ah);
203 
204 /* Shared receive queues */
205 
206 struct ib_srq *ib_create_srq(struct ib_pd *pd,
207 			     struct ib_srq_init_attr *srq_init_attr)
208 {
209 	struct ib_srq *srq;
210 
211 	if (!pd->device->create_srq)
212 		return ERR_PTR(-ENOSYS);
213 
214 	srq = pd->device->create_srq(pd, srq_init_attr, NULL);
215 
216 	if (!IS_ERR(srq)) {
217 		srq->device    	   = pd->device;
218 		srq->pd        	   = pd;
219 		srq->uobject       = NULL;
220 		srq->event_handler = srq_init_attr->event_handler;
221 		srq->srq_context   = srq_init_attr->srq_context;
222 		atomic_inc(&pd->usecnt);
223 		atomic_set(&srq->usecnt, 0);
224 	}
225 
226 	return srq;
227 }
228 EXPORT_SYMBOL(ib_create_srq);
229 
230 int ib_modify_srq(struct ib_srq *srq,
231 		  struct ib_srq_attr *srq_attr,
232 		  enum ib_srq_attr_mask srq_attr_mask)
233 {
234 	return srq->device->modify_srq(srq, srq_attr, srq_attr_mask);
235 }
236 EXPORT_SYMBOL(ib_modify_srq);
237 
238 int ib_query_srq(struct ib_srq *srq,
239 		 struct ib_srq_attr *srq_attr)
240 {
241 	return srq->device->query_srq ?
242 		srq->device->query_srq(srq, srq_attr) : -ENOSYS;
243 }
244 EXPORT_SYMBOL(ib_query_srq);
245 
246 int ib_destroy_srq(struct ib_srq *srq)
247 {
248 	struct ib_pd *pd;
249 	int ret;
250 
251 	if (atomic_read(&srq->usecnt))
252 		return -EBUSY;
253 
254 	pd = srq->pd;
255 
256 	ret = srq->device->destroy_srq(srq);
257 	if (!ret)
258 		atomic_dec(&pd->usecnt);
259 
260 	return ret;
261 }
262 EXPORT_SYMBOL(ib_destroy_srq);
263 
264 /* Queue pairs */
265 
266 struct ib_qp *ib_create_qp(struct ib_pd *pd,
267 			   struct ib_qp_init_attr *qp_init_attr)
268 {
269 	struct ib_qp *qp;
270 
271 	qp = pd->device->create_qp(pd, qp_init_attr, NULL);
272 
273 	if (!IS_ERR(qp)) {
274 		qp->device     	  = pd->device;
275 		qp->pd         	  = pd;
276 		qp->send_cq    	  = qp_init_attr->send_cq;
277 		qp->recv_cq    	  = qp_init_attr->recv_cq;
278 		qp->srq	       	  = qp_init_attr->srq;
279 		qp->uobject       = NULL;
280 		qp->event_handler = qp_init_attr->event_handler;
281 		qp->qp_context    = qp_init_attr->qp_context;
282 		qp->qp_type	  = qp_init_attr->qp_type;
283 		atomic_inc(&pd->usecnt);
284 		atomic_inc(&qp_init_attr->send_cq->usecnt);
285 		atomic_inc(&qp_init_attr->recv_cq->usecnt);
286 		if (qp_init_attr->srq)
287 			atomic_inc(&qp_init_attr->srq->usecnt);
288 	}
289 
290 	return qp;
291 }
292 EXPORT_SYMBOL(ib_create_qp);
293 
294 static const struct {
295 	int			valid;
296 	enum ib_qp_attr_mask	req_param[IB_QPT_RAW_ETY + 1];
297 	enum ib_qp_attr_mask	opt_param[IB_QPT_RAW_ETY + 1];
298 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
299 	[IB_QPS_RESET] = {
300 		[IB_QPS_RESET] = { .valid = 1 },
301 		[IB_QPS_ERR]   = { .valid = 1 },
302 		[IB_QPS_INIT]  = {
303 			.valid = 1,
304 			.req_param = {
305 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
306 						IB_QP_PORT			|
307 						IB_QP_QKEY),
308 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
309 						IB_QP_PORT			|
310 						IB_QP_ACCESS_FLAGS),
311 				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
312 						IB_QP_PORT			|
313 						IB_QP_ACCESS_FLAGS),
314 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
315 						IB_QP_QKEY),
316 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
317 						IB_QP_QKEY),
318 			}
319 		},
320 	},
321 	[IB_QPS_INIT]  = {
322 		[IB_QPS_RESET] = { .valid = 1 },
323 		[IB_QPS_ERR] =   { .valid = 1 },
324 		[IB_QPS_INIT]  = {
325 			.valid = 1,
326 			.opt_param = {
327 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
328 						IB_QP_PORT			|
329 						IB_QP_QKEY),
330 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
331 						IB_QP_PORT			|
332 						IB_QP_ACCESS_FLAGS),
333 				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
334 						IB_QP_PORT			|
335 						IB_QP_ACCESS_FLAGS),
336 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
337 						IB_QP_QKEY),
338 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
339 						IB_QP_QKEY),
340 			}
341 		},
342 		[IB_QPS_RTR]   = {
343 			.valid = 1,
344 			.req_param = {
345 				[IB_QPT_UC]  = (IB_QP_AV			|
346 						IB_QP_PATH_MTU			|
347 						IB_QP_DEST_QPN			|
348 						IB_QP_RQ_PSN),
349 				[IB_QPT_RC]  = (IB_QP_AV			|
350 						IB_QP_PATH_MTU			|
351 						IB_QP_DEST_QPN			|
352 						IB_QP_RQ_PSN			|
353 						IB_QP_MAX_DEST_RD_ATOMIC	|
354 						IB_QP_MIN_RNR_TIMER),
355 			},
356 			.opt_param = {
357 				 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
358 						 IB_QP_QKEY),
359 				 [IB_QPT_UC]  = (IB_QP_ALT_PATH			|
360 						 IB_QP_ACCESS_FLAGS		|
361 						 IB_QP_PKEY_INDEX),
362 				 [IB_QPT_RC]  = (IB_QP_ALT_PATH			|
363 						 IB_QP_ACCESS_FLAGS		|
364 						 IB_QP_PKEY_INDEX),
365 				 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
366 						 IB_QP_QKEY),
367 				 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
368 						 IB_QP_QKEY),
369 			 }
370 		}
371 	},
372 	[IB_QPS_RTR]   = {
373 		[IB_QPS_RESET] = { .valid = 1 },
374 		[IB_QPS_ERR] =   { .valid = 1 },
375 		[IB_QPS_RTS]   = {
376 			.valid = 1,
377 			.req_param = {
378 				[IB_QPT_UD]  = IB_QP_SQ_PSN,
379 				[IB_QPT_UC]  = IB_QP_SQ_PSN,
380 				[IB_QPT_RC]  = (IB_QP_TIMEOUT			|
381 						IB_QP_RETRY_CNT			|
382 						IB_QP_RNR_RETRY			|
383 						IB_QP_SQ_PSN			|
384 						IB_QP_MAX_QP_RD_ATOMIC),
385 				[IB_QPT_SMI] = IB_QP_SQ_PSN,
386 				[IB_QPT_GSI] = IB_QP_SQ_PSN,
387 			},
388 			.opt_param = {
389 				 [IB_QPT_UD]  = (IB_QP_CUR_STATE		|
390 						 IB_QP_QKEY),
391 				 [IB_QPT_UC]  = (IB_QP_CUR_STATE		|
392 						 IB_QP_ALT_PATH			|
393 						 IB_QP_ACCESS_FLAGS		|
394 						 IB_QP_PATH_MIG_STATE),
395 				 [IB_QPT_RC]  = (IB_QP_CUR_STATE		|
396 						 IB_QP_ALT_PATH			|
397 						 IB_QP_ACCESS_FLAGS		|
398 						 IB_QP_MIN_RNR_TIMER		|
399 						 IB_QP_PATH_MIG_STATE),
400 				 [IB_QPT_SMI] = (IB_QP_CUR_STATE		|
401 						 IB_QP_QKEY),
402 				 [IB_QPT_GSI] = (IB_QP_CUR_STATE		|
403 						 IB_QP_QKEY),
404 			 }
405 		}
406 	},
407 	[IB_QPS_RTS]   = {
408 		[IB_QPS_RESET] = { .valid = 1 },
409 		[IB_QPS_ERR] =   { .valid = 1 },
410 		[IB_QPS_RTS]   = {
411 			.valid = 1,
412 			.opt_param = {
413 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
414 						IB_QP_QKEY),
415 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
416 						IB_QP_ACCESS_FLAGS		|
417 						IB_QP_ALT_PATH			|
418 						IB_QP_PATH_MIG_STATE),
419 				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
420 						IB_QP_ACCESS_FLAGS		|
421 						IB_QP_ALT_PATH			|
422 						IB_QP_PATH_MIG_STATE		|
423 						IB_QP_MIN_RNR_TIMER),
424 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
425 						IB_QP_QKEY),
426 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
427 						IB_QP_QKEY),
428 			}
429 		},
430 		[IB_QPS_SQD]   = {
431 			.valid = 1,
432 			.opt_param = {
433 				[IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
434 				[IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
435 				[IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
436 				[IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
437 				[IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
438 			}
439 		},
440 	},
441 	[IB_QPS_SQD]   = {
442 		[IB_QPS_RESET] = { .valid = 1 },
443 		[IB_QPS_ERR] =   { .valid = 1 },
444 		[IB_QPS_RTS]   = {
445 			.valid = 1,
446 			.opt_param = {
447 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
448 						IB_QP_QKEY),
449 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
450 						IB_QP_ALT_PATH			|
451 						IB_QP_ACCESS_FLAGS		|
452 						IB_QP_PATH_MIG_STATE),
453 				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
454 						IB_QP_ALT_PATH			|
455 						IB_QP_ACCESS_FLAGS		|
456 						IB_QP_MIN_RNR_TIMER		|
457 						IB_QP_PATH_MIG_STATE),
458 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
459 						IB_QP_QKEY),
460 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
461 						IB_QP_QKEY),
462 			}
463 		},
464 		[IB_QPS_SQD]   = {
465 			.valid = 1,
466 			.opt_param = {
467 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
468 						IB_QP_QKEY),
469 				[IB_QPT_UC]  = (IB_QP_AV			|
470 						IB_QP_ALT_PATH			|
471 						IB_QP_ACCESS_FLAGS		|
472 						IB_QP_PKEY_INDEX		|
473 						IB_QP_PATH_MIG_STATE),
474 				[IB_QPT_RC]  = (IB_QP_PORT			|
475 						IB_QP_AV			|
476 						IB_QP_TIMEOUT			|
477 						IB_QP_RETRY_CNT			|
478 						IB_QP_RNR_RETRY			|
479 						IB_QP_MAX_QP_RD_ATOMIC		|
480 						IB_QP_MAX_DEST_RD_ATOMIC	|
481 						IB_QP_ALT_PATH			|
482 						IB_QP_ACCESS_FLAGS		|
483 						IB_QP_PKEY_INDEX		|
484 						IB_QP_MIN_RNR_TIMER		|
485 						IB_QP_PATH_MIG_STATE),
486 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
487 						IB_QP_QKEY),
488 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
489 						IB_QP_QKEY),
490 			}
491 		}
492 	},
493 	[IB_QPS_SQE]   = {
494 		[IB_QPS_RESET] = { .valid = 1 },
495 		[IB_QPS_ERR] =   { .valid = 1 },
496 		[IB_QPS_RTS]   = {
497 			.valid = 1,
498 			.opt_param = {
499 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
500 						IB_QP_QKEY),
501 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
502 						IB_QP_ACCESS_FLAGS),
503 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
504 						IB_QP_QKEY),
505 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
506 						IB_QP_QKEY),
507 			}
508 		}
509 	},
510 	[IB_QPS_ERR] = {
511 		[IB_QPS_RESET] = { .valid = 1 },
512 		[IB_QPS_ERR] =   { .valid = 1 }
513 	}
514 };
515 
516 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
517 		       enum ib_qp_type type, enum ib_qp_attr_mask mask)
518 {
519 	enum ib_qp_attr_mask req_param, opt_param;
520 
521 	if (cur_state  < 0 || cur_state  > IB_QPS_ERR ||
522 	    next_state < 0 || next_state > IB_QPS_ERR)
523 		return 0;
524 
525 	if (mask & IB_QP_CUR_STATE  &&
526 	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
527 	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
528 		return 0;
529 
530 	if (!qp_state_table[cur_state][next_state].valid)
531 		return 0;
532 
533 	req_param = qp_state_table[cur_state][next_state].req_param[type];
534 	opt_param = qp_state_table[cur_state][next_state].opt_param[type];
535 
536 	if ((mask & req_param) != req_param)
537 		return 0;
538 
539 	if (mask & ~(req_param | opt_param | IB_QP_STATE))
540 		return 0;
541 
542 	return 1;
543 }
544 EXPORT_SYMBOL(ib_modify_qp_is_ok);
545 
546 int ib_modify_qp(struct ib_qp *qp,
547 		 struct ib_qp_attr *qp_attr,
548 		 int qp_attr_mask)
549 {
550 	return qp->device->modify_qp(qp, qp_attr, qp_attr_mask);
551 }
552 EXPORT_SYMBOL(ib_modify_qp);
553 
554 int ib_query_qp(struct ib_qp *qp,
555 		struct ib_qp_attr *qp_attr,
556 		int qp_attr_mask,
557 		struct ib_qp_init_attr *qp_init_attr)
558 {
559 	return qp->device->query_qp ?
560 		qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
561 		-ENOSYS;
562 }
563 EXPORT_SYMBOL(ib_query_qp);
564 
565 int ib_destroy_qp(struct ib_qp *qp)
566 {
567 	struct ib_pd *pd;
568 	struct ib_cq *scq, *rcq;
569 	struct ib_srq *srq;
570 	int ret;
571 
572 	pd  = qp->pd;
573 	scq = qp->send_cq;
574 	rcq = qp->recv_cq;
575 	srq = qp->srq;
576 
577 	ret = qp->device->destroy_qp(qp);
578 	if (!ret) {
579 		atomic_dec(&pd->usecnt);
580 		atomic_dec(&scq->usecnt);
581 		atomic_dec(&rcq->usecnt);
582 		if (srq)
583 			atomic_dec(&srq->usecnt);
584 	}
585 
586 	return ret;
587 }
588 EXPORT_SYMBOL(ib_destroy_qp);
589 
590 /* Completion queues */
591 
592 struct ib_cq *ib_create_cq(struct ib_device *device,
593 			   ib_comp_handler comp_handler,
594 			   void (*event_handler)(struct ib_event *, void *),
595 			   void *cq_context, int cqe)
596 {
597 	struct ib_cq *cq;
598 
599 	cq = device->create_cq(device, cqe, NULL, NULL);
600 
601 	if (!IS_ERR(cq)) {
602 		cq->device        = device;
603 		cq->uobject       = NULL;
604 		cq->comp_handler  = comp_handler;
605 		cq->event_handler = event_handler;
606 		cq->cq_context    = cq_context;
607 		atomic_set(&cq->usecnt, 0);
608 	}
609 
610 	return cq;
611 }
612 EXPORT_SYMBOL(ib_create_cq);
613 
614 int ib_destroy_cq(struct ib_cq *cq)
615 {
616 	if (atomic_read(&cq->usecnt))
617 		return -EBUSY;
618 
619 	return cq->device->destroy_cq(cq);
620 }
621 EXPORT_SYMBOL(ib_destroy_cq);
622 
623 int ib_resize_cq(struct ib_cq *cq, int cqe)
624 {
625 	return cq->device->resize_cq ?
626 		cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
627 }
628 EXPORT_SYMBOL(ib_resize_cq);
629 
630 /* Memory regions */
631 
632 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
633 {
634 	struct ib_mr *mr;
635 
636 	mr = pd->device->get_dma_mr(pd, mr_access_flags);
637 
638 	if (!IS_ERR(mr)) {
639 		mr->device  = pd->device;
640 		mr->pd      = pd;
641 		mr->uobject = NULL;
642 		atomic_inc(&pd->usecnt);
643 		atomic_set(&mr->usecnt, 0);
644 	}
645 
646 	return mr;
647 }
648 EXPORT_SYMBOL(ib_get_dma_mr);
649 
650 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
651 			     struct ib_phys_buf *phys_buf_array,
652 			     int num_phys_buf,
653 			     int mr_access_flags,
654 			     u64 *iova_start)
655 {
656 	struct ib_mr *mr;
657 
658 	mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
659 				     mr_access_flags, iova_start);
660 
661 	if (!IS_ERR(mr)) {
662 		mr->device  = pd->device;
663 		mr->pd      = pd;
664 		mr->uobject = NULL;
665 		atomic_inc(&pd->usecnt);
666 		atomic_set(&mr->usecnt, 0);
667 	}
668 
669 	return mr;
670 }
671 EXPORT_SYMBOL(ib_reg_phys_mr);
672 
673 int ib_rereg_phys_mr(struct ib_mr *mr,
674 		     int mr_rereg_mask,
675 		     struct ib_pd *pd,
676 		     struct ib_phys_buf *phys_buf_array,
677 		     int num_phys_buf,
678 		     int mr_access_flags,
679 		     u64 *iova_start)
680 {
681 	struct ib_pd *old_pd;
682 	int ret;
683 
684 	if (!mr->device->rereg_phys_mr)
685 		return -ENOSYS;
686 
687 	if (atomic_read(&mr->usecnt))
688 		return -EBUSY;
689 
690 	old_pd = mr->pd;
691 
692 	ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
693 					phys_buf_array, num_phys_buf,
694 					mr_access_flags, iova_start);
695 
696 	if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
697 		atomic_dec(&old_pd->usecnt);
698 		atomic_inc(&pd->usecnt);
699 	}
700 
701 	return ret;
702 }
703 EXPORT_SYMBOL(ib_rereg_phys_mr);
704 
705 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
706 {
707 	return mr->device->query_mr ?
708 		mr->device->query_mr(mr, mr_attr) : -ENOSYS;
709 }
710 EXPORT_SYMBOL(ib_query_mr);
711 
712 int ib_dereg_mr(struct ib_mr *mr)
713 {
714 	struct ib_pd *pd;
715 	int ret;
716 
717 	if (atomic_read(&mr->usecnt))
718 		return -EBUSY;
719 
720 	pd = mr->pd;
721 	ret = mr->device->dereg_mr(mr);
722 	if (!ret)
723 		atomic_dec(&pd->usecnt);
724 
725 	return ret;
726 }
727 EXPORT_SYMBOL(ib_dereg_mr);
728 
729 /* Memory windows */
730 
731 struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
732 {
733 	struct ib_mw *mw;
734 
735 	if (!pd->device->alloc_mw)
736 		return ERR_PTR(-ENOSYS);
737 
738 	mw = pd->device->alloc_mw(pd);
739 	if (!IS_ERR(mw)) {
740 		mw->device  = pd->device;
741 		mw->pd      = pd;
742 		mw->uobject = NULL;
743 		atomic_inc(&pd->usecnt);
744 	}
745 
746 	return mw;
747 }
748 EXPORT_SYMBOL(ib_alloc_mw);
749 
750 int ib_dealloc_mw(struct ib_mw *mw)
751 {
752 	struct ib_pd *pd;
753 	int ret;
754 
755 	pd = mw->pd;
756 	ret = mw->device->dealloc_mw(mw);
757 	if (!ret)
758 		atomic_dec(&pd->usecnt);
759 
760 	return ret;
761 }
762 EXPORT_SYMBOL(ib_dealloc_mw);
763 
764 /* "Fast" memory regions */
765 
766 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
767 			    int mr_access_flags,
768 			    struct ib_fmr_attr *fmr_attr)
769 {
770 	struct ib_fmr *fmr;
771 
772 	if (!pd->device->alloc_fmr)
773 		return ERR_PTR(-ENOSYS);
774 
775 	fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
776 	if (!IS_ERR(fmr)) {
777 		fmr->device = pd->device;
778 		fmr->pd     = pd;
779 		atomic_inc(&pd->usecnt);
780 	}
781 
782 	return fmr;
783 }
784 EXPORT_SYMBOL(ib_alloc_fmr);
785 
786 int ib_unmap_fmr(struct list_head *fmr_list)
787 {
788 	struct ib_fmr *fmr;
789 
790 	if (list_empty(fmr_list))
791 		return 0;
792 
793 	fmr = list_entry(fmr_list->next, struct ib_fmr, list);
794 	return fmr->device->unmap_fmr(fmr_list);
795 }
796 EXPORT_SYMBOL(ib_unmap_fmr);
797 
798 int ib_dealloc_fmr(struct ib_fmr *fmr)
799 {
800 	struct ib_pd *pd;
801 	int ret;
802 
803 	pd = fmr->pd;
804 	ret = fmr->device->dealloc_fmr(fmr);
805 	if (!ret)
806 		atomic_dec(&pd->usecnt);
807 
808 	return ret;
809 }
810 EXPORT_SYMBOL(ib_dealloc_fmr);
811 
812 /* Multicast groups */
813 
814 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
815 {
816 	if (!qp->device->attach_mcast)
817 		return -ENOSYS;
818 	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
819 		return -EINVAL;
820 
821 	return qp->device->attach_mcast(qp, gid, lid);
822 }
823 EXPORT_SYMBOL(ib_attach_mcast);
824 
825 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
826 {
827 	if (!qp->device->detach_mcast)
828 		return -ENOSYS;
829 	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
830 		return -EINVAL;
831 
832 	return qp->device->detach_mcast(qp, gid, lid);
833 }
834 EXPORT_SYMBOL(ib_detach_mcast);
835