xref: /linux/drivers/infiniband/core/verbs.c (revision bcefe12eff5dca6fdfa94ed85e5bee66380d5cd9)
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  */
38 
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/string.h>
42 
43 #include <rdma/ib_verbs.h>
44 #include <rdma/ib_cache.h>
45 
46 int ib_rate_to_mult(enum ib_rate rate)
47 {
48 	switch (rate) {
49 	case IB_RATE_2_5_GBPS: return  1;
50 	case IB_RATE_5_GBPS:   return  2;
51 	case IB_RATE_10_GBPS:  return  4;
52 	case IB_RATE_20_GBPS:  return  8;
53 	case IB_RATE_30_GBPS:  return 12;
54 	case IB_RATE_40_GBPS:  return 16;
55 	case IB_RATE_60_GBPS:  return 24;
56 	case IB_RATE_80_GBPS:  return 32;
57 	case IB_RATE_120_GBPS: return 48;
58 	default:	       return -1;
59 	}
60 }
61 EXPORT_SYMBOL(ib_rate_to_mult);
62 
63 enum ib_rate mult_to_ib_rate(int mult)
64 {
65 	switch (mult) {
66 	case 1:  return IB_RATE_2_5_GBPS;
67 	case 2:  return IB_RATE_5_GBPS;
68 	case 4:  return IB_RATE_10_GBPS;
69 	case 8:  return IB_RATE_20_GBPS;
70 	case 12: return IB_RATE_30_GBPS;
71 	case 16: return IB_RATE_40_GBPS;
72 	case 24: return IB_RATE_60_GBPS;
73 	case 32: return IB_RATE_80_GBPS;
74 	case 48: return IB_RATE_120_GBPS;
75 	default: return IB_RATE_PORT_CURRENT;
76 	}
77 }
78 EXPORT_SYMBOL(mult_to_ib_rate);
79 
80 enum rdma_transport_type
81 rdma_node_get_transport(enum rdma_node_type node_type)
82 {
83 	switch (node_type) {
84 	case RDMA_NODE_IB_CA:
85 	case RDMA_NODE_IB_SWITCH:
86 	case RDMA_NODE_IB_ROUTER:
87 		return RDMA_TRANSPORT_IB;
88 	case RDMA_NODE_RNIC:
89 		return RDMA_TRANSPORT_IWARP;
90 	default:
91 		BUG();
92 		return 0;
93 	}
94 }
95 EXPORT_SYMBOL(rdma_node_get_transport);
96 
97 /* Protection domains */
98 
99 struct ib_pd *ib_alloc_pd(struct ib_device *device)
100 {
101 	struct ib_pd *pd;
102 
103 	pd = device->alloc_pd(device, NULL, NULL);
104 
105 	if (!IS_ERR(pd)) {
106 		pd->device  = device;
107 		pd->uobject = NULL;
108 		atomic_set(&pd->usecnt, 0);
109 	}
110 
111 	return pd;
112 }
113 EXPORT_SYMBOL(ib_alloc_pd);
114 
115 int ib_dealloc_pd(struct ib_pd *pd)
116 {
117 	if (atomic_read(&pd->usecnt))
118 		return -EBUSY;
119 
120 	return pd->device->dealloc_pd(pd);
121 }
122 EXPORT_SYMBOL(ib_dealloc_pd);
123 
124 /* Address handles */
125 
126 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
127 {
128 	struct ib_ah *ah;
129 
130 	ah = pd->device->create_ah(pd, ah_attr);
131 
132 	if (!IS_ERR(ah)) {
133 		ah->device  = pd->device;
134 		ah->pd      = pd;
135 		ah->uobject = NULL;
136 		atomic_inc(&pd->usecnt);
137 	}
138 
139 	return ah;
140 }
141 EXPORT_SYMBOL(ib_create_ah);
142 
143 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
144 		       struct ib_grh *grh, struct ib_ah_attr *ah_attr)
145 {
146 	u32 flow_class;
147 	u16 gid_index;
148 	int ret;
149 
150 	memset(ah_attr, 0, sizeof *ah_attr);
151 	ah_attr->dlid = wc->slid;
152 	ah_attr->sl = wc->sl;
153 	ah_attr->src_path_bits = wc->dlid_path_bits;
154 	ah_attr->port_num = port_num;
155 
156 	if (wc->wc_flags & IB_WC_GRH) {
157 		ah_attr->ah_flags = IB_AH_GRH;
158 		ah_attr->grh.dgid = grh->sgid;
159 
160 		ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
161 					 &gid_index);
162 		if (ret)
163 			return ret;
164 
165 		ah_attr->grh.sgid_index = (u8) gid_index;
166 		flow_class = be32_to_cpu(grh->version_tclass_flow);
167 		ah_attr->grh.flow_label = flow_class & 0xFFFFF;
168 		ah_attr->grh.hop_limit = 0xFF;
169 		ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
170 	}
171 	return 0;
172 }
173 EXPORT_SYMBOL(ib_init_ah_from_wc);
174 
175 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
176 				   struct ib_grh *grh, u8 port_num)
177 {
178 	struct ib_ah_attr ah_attr;
179 	int ret;
180 
181 	ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
182 	if (ret)
183 		return ERR_PTR(ret);
184 
185 	return ib_create_ah(pd, &ah_attr);
186 }
187 EXPORT_SYMBOL(ib_create_ah_from_wc);
188 
189 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
190 {
191 	return ah->device->modify_ah ?
192 		ah->device->modify_ah(ah, ah_attr) :
193 		-ENOSYS;
194 }
195 EXPORT_SYMBOL(ib_modify_ah);
196 
197 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
198 {
199 	return ah->device->query_ah ?
200 		ah->device->query_ah(ah, ah_attr) :
201 		-ENOSYS;
202 }
203 EXPORT_SYMBOL(ib_query_ah);
204 
205 int ib_destroy_ah(struct ib_ah *ah)
206 {
207 	struct ib_pd *pd;
208 	int ret;
209 
210 	pd = ah->pd;
211 	ret = ah->device->destroy_ah(ah);
212 	if (!ret)
213 		atomic_dec(&pd->usecnt);
214 
215 	return ret;
216 }
217 EXPORT_SYMBOL(ib_destroy_ah);
218 
219 /* Shared receive queues */
220 
221 struct ib_srq *ib_create_srq(struct ib_pd *pd,
222 			     struct ib_srq_init_attr *srq_init_attr)
223 {
224 	struct ib_srq *srq;
225 
226 	if (!pd->device->create_srq)
227 		return ERR_PTR(-ENOSYS);
228 
229 	srq = pd->device->create_srq(pd, srq_init_attr, NULL);
230 
231 	if (!IS_ERR(srq)) {
232 		srq->device    	   = pd->device;
233 		srq->pd        	   = pd;
234 		srq->uobject       = NULL;
235 		srq->event_handler = srq_init_attr->event_handler;
236 		srq->srq_context   = srq_init_attr->srq_context;
237 		atomic_inc(&pd->usecnt);
238 		atomic_set(&srq->usecnt, 0);
239 	}
240 
241 	return srq;
242 }
243 EXPORT_SYMBOL(ib_create_srq);
244 
245 int ib_modify_srq(struct ib_srq *srq,
246 		  struct ib_srq_attr *srq_attr,
247 		  enum ib_srq_attr_mask srq_attr_mask)
248 {
249 	return srq->device->modify_srq ?
250 		srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
251 		-ENOSYS;
252 }
253 EXPORT_SYMBOL(ib_modify_srq);
254 
255 int ib_query_srq(struct ib_srq *srq,
256 		 struct ib_srq_attr *srq_attr)
257 {
258 	return srq->device->query_srq ?
259 		srq->device->query_srq(srq, srq_attr) : -ENOSYS;
260 }
261 EXPORT_SYMBOL(ib_query_srq);
262 
263 int ib_destroy_srq(struct ib_srq *srq)
264 {
265 	struct ib_pd *pd;
266 	int ret;
267 
268 	if (atomic_read(&srq->usecnt))
269 		return -EBUSY;
270 
271 	pd = srq->pd;
272 
273 	ret = srq->device->destroy_srq(srq);
274 	if (!ret)
275 		atomic_dec(&pd->usecnt);
276 
277 	return ret;
278 }
279 EXPORT_SYMBOL(ib_destroy_srq);
280 
281 /* Queue pairs */
282 
283 struct ib_qp *ib_create_qp(struct ib_pd *pd,
284 			   struct ib_qp_init_attr *qp_init_attr)
285 {
286 	struct ib_qp *qp;
287 
288 	qp = pd->device->create_qp(pd, qp_init_attr, NULL);
289 
290 	if (!IS_ERR(qp)) {
291 		qp->device     	  = pd->device;
292 		qp->pd         	  = pd;
293 		qp->send_cq    	  = qp_init_attr->send_cq;
294 		qp->recv_cq    	  = qp_init_attr->recv_cq;
295 		qp->srq	       	  = qp_init_attr->srq;
296 		qp->uobject       = NULL;
297 		qp->event_handler = qp_init_attr->event_handler;
298 		qp->qp_context    = qp_init_attr->qp_context;
299 		qp->qp_type	  = qp_init_attr->qp_type;
300 		atomic_inc(&pd->usecnt);
301 		atomic_inc(&qp_init_attr->send_cq->usecnt);
302 		atomic_inc(&qp_init_attr->recv_cq->usecnt);
303 		if (qp_init_attr->srq)
304 			atomic_inc(&qp_init_attr->srq->usecnt);
305 	}
306 
307 	return qp;
308 }
309 EXPORT_SYMBOL(ib_create_qp);
310 
311 static const struct {
312 	int			valid;
313 	enum ib_qp_attr_mask	req_param[IB_QPT_RAW_ETY + 1];
314 	enum ib_qp_attr_mask	opt_param[IB_QPT_RAW_ETY + 1];
315 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
316 	[IB_QPS_RESET] = {
317 		[IB_QPS_RESET] = { .valid = 1 },
318 		[IB_QPS_INIT]  = {
319 			.valid = 1,
320 			.req_param = {
321 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
322 						IB_QP_PORT			|
323 						IB_QP_QKEY),
324 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
325 						IB_QP_PORT			|
326 						IB_QP_ACCESS_FLAGS),
327 				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
328 						IB_QP_PORT			|
329 						IB_QP_ACCESS_FLAGS),
330 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
331 						IB_QP_QKEY),
332 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
333 						IB_QP_QKEY),
334 			}
335 		},
336 	},
337 	[IB_QPS_INIT]  = {
338 		[IB_QPS_RESET] = { .valid = 1 },
339 		[IB_QPS_ERR] =   { .valid = 1 },
340 		[IB_QPS_INIT]  = {
341 			.valid = 1,
342 			.opt_param = {
343 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
344 						IB_QP_PORT			|
345 						IB_QP_QKEY),
346 				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
347 						IB_QP_PORT			|
348 						IB_QP_ACCESS_FLAGS),
349 				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
350 						IB_QP_PORT			|
351 						IB_QP_ACCESS_FLAGS),
352 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
353 						IB_QP_QKEY),
354 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
355 						IB_QP_QKEY),
356 			}
357 		},
358 		[IB_QPS_RTR]   = {
359 			.valid = 1,
360 			.req_param = {
361 				[IB_QPT_UC]  = (IB_QP_AV			|
362 						IB_QP_PATH_MTU			|
363 						IB_QP_DEST_QPN			|
364 						IB_QP_RQ_PSN),
365 				[IB_QPT_RC]  = (IB_QP_AV			|
366 						IB_QP_PATH_MTU			|
367 						IB_QP_DEST_QPN			|
368 						IB_QP_RQ_PSN			|
369 						IB_QP_MAX_DEST_RD_ATOMIC	|
370 						IB_QP_MIN_RNR_TIMER),
371 			},
372 			.opt_param = {
373 				 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
374 						 IB_QP_QKEY),
375 				 [IB_QPT_UC]  = (IB_QP_ALT_PATH			|
376 						 IB_QP_ACCESS_FLAGS		|
377 						 IB_QP_PKEY_INDEX),
378 				 [IB_QPT_RC]  = (IB_QP_ALT_PATH			|
379 						 IB_QP_ACCESS_FLAGS		|
380 						 IB_QP_PKEY_INDEX),
381 				 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
382 						 IB_QP_QKEY),
383 				 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
384 						 IB_QP_QKEY),
385 			 }
386 		}
387 	},
388 	[IB_QPS_RTR]   = {
389 		[IB_QPS_RESET] = { .valid = 1 },
390 		[IB_QPS_ERR] =   { .valid = 1 },
391 		[IB_QPS_RTS]   = {
392 			.valid = 1,
393 			.req_param = {
394 				[IB_QPT_UD]  = IB_QP_SQ_PSN,
395 				[IB_QPT_UC]  = IB_QP_SQ_PSN,
396 				[IB_QPT_RC]  = (IB_QP_TIMEOUT			|
397 						IB_QP_RETRY_CNT			|
398 						IB_QP_RNR_RETRY			|
399 						IB_QP_SQ_PSN			|
400 						IB_QP_MAX_QP_RD_ATOMIC),
401 				[IB_QPT_SMI] = IB_QP_SQ_PSN,
402 				[IB_QPT_GSI] = IB_QP_SQ_PSN,
403 			},
404 			.opt_param = {
405 				 [IB_QPT_UD]  = (IB_QP_CUR_STATE		|
406 						 IB_QP_QKEY),
407 				 [IB_QPT_UC]  = (IB_QP_CUR_STATE		|
408 						 IB_QP_ALT_PATH			|
409 						 IB_QP_ACCESS_FLAGS		|
410 						 IB_QP_PATH_MIG_STATE),
411 				 [IB_QPT_RC]  = (IB_QP_CUR_STATE		|
412 						 IB_QP_ALT_PATH			|
413 						 IB_QP_ACCESS_FLAGS		|
414 						 IB_QP_MIN_RNR_TIMER		|
415 						 IB_QP_PATH_MIG_STATE),
416 				 [IB_QPT_SMI] = (IB_QP_CUR_STATE		|
417 						 IB_QP_QKEY),
418 				 [IB_QPT_GSI] = (IB_QP_CUR_STATE		|
419 						 IB_QP_QKEY),
420 			 }
421 		}
422 	},
423 	[IB_QPS_RTS]   = {
424 		[IB_QPS_RESET] = { .valid = 1 },
425 		[IB_QPS_ERR] =   { .valid = 1 },
426 		[IB_QPS_RTS]   = {
427 			.valid = 1,
428 			.opt_param = {
429 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
430 						IB_QP_QKEY),
431 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
432 						IB_QP_ACCESS_FLAGS		|
433 						IB_QP_ALT_PATH			|
434 						IB_QP_PATH_MIG_STATE),
435 				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
436 						IB_QP_ACCESS_FLAGS		|
437 						IB_QP_ALT_PATH			|
438 						IB_QP_PATH_MIG_STATE		|
439 						IB_QP_MIN_RNR_TIMER),
440 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
441 						IB_QP_QKEY),
442 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
443 						IB_QP_QKEY),
444 			}
445 		},
446 		[IB_QPS_SQD]   = {
447 			.valid = 1,
448 			.opt_param = {
449 				[IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
450 				[IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
451 				[IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
452 				[IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
453 				[IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
454 			}
455 		},
456 	},
457 	[IB_QPS_SQD]   = {
458 		[IB_QPS_RESET] = { .valid = 1 },
459 		[IB_QPS_ERR] =   { .valid = 1 },
460 		[IB_QPS_RTS]   = {
461 			.valid = 1,
462 			.opt_param = {
463 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
464 						IB_QP_QKEY),
465 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
466 						IB_QP_ALT_PATH			|
467 						IB_QP_ACCESS_FLAGS		|
468 						IB_QP_PATH_MIG_STATE),
469 				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
470 						IB_QP_ALT_PATH			|
471 						IB_QP_ACCESS_FLAGS		|
472 						IB_QP_MIN_RNR_TIMER		|
473 						IB_QP_PATH_MIG_STATE),
474 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
475 						IB_QP_QKEY),
476 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
477 						IB_QP_QKEY),
478 			}
479 		},
480 		[IB_QPS_SQD]   = {
481 			.valid = 1,
482 			.opt_param = {
483 				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
484 						IB_QP_QKEY),
485 				[IB_QPT_UC]  = (IB_QP_AV			|
486 						IB_QP_ALT_PATH			|
487 						IB_QP_ACCESS_FLAGS		|
488 						IB_QP_PKEY_INDEX		|
489 						IB_QP_PATH_MIG_STATE),
490 				[IB_QPT_RC]  = (IB_QP_PORT			|
491 						IB_QP_AV			|
492 						IB_QP_TIMEOUT			|
493 						IB_QP_RETRY_CNT			|
494 						IB_QP_RNR_RETRY			|
495 						IB_QP_MAX_QP_RD_ATOMIC		|
496 						IB_QP_MAX_DEST_RD_ATOMIC	|
497 						IB_QP_ALT_PATH			|
498 						IB_QP_ACCESS_FLAGS		|
499 						IB_QP_PKEY_INDEX		|
500 						IB_QP_MIN_RNR_TIMER		|
501 						IB_QP_PATH_MIG_STATE),
502 				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
503 						IB_QP_QKEY),
504 				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
505 						IB_QP_QKEY),
506 			}
507 		}
508 	},
509 	[IB_QPS_SQE]   = {
510 		[IB_QPS_RESET] = { .valid = 1 },
511 		[IB_QPS_ERR] =   { .valid = 1 },
512 		[IB_QPS_RTS]   = {
513 			.valid = 1,
514 			.opt_param = {
515 				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
516 						IB_QP_QKEY),
517 				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
518 						IB_QP_ACCESS_FLAGS),
519 				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
520 						IB_QP_QKEY),
521 				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
522 						IB_QP_QKEY),
523 			}
524 		}
525 	},
526 	[IB_QPS_ERR] = {
527 		[IB_QPS_RESET] = { .valid = 1 },
528 		[IB_QPS_ERR] =   { .valid = 1 }
529 	}
530 };
531 
532 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
533 		       enum ib_qp_type type, enum ib_qp_attr_mask mask)
534 {
535 	enum ib_qp_attr_mask req_param, opt_param;
536 
537 	if (cur_state  < 0 || cur_state  > IB_QPS_ERR ||
538 	    next_state < 0 || next_state > IB_QPS_ERR)
539 		return 0;
540 
541 	if (mask & IB_QP_CUR_STATE  &&
542 	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
543 	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
544 		return 0;
545 
546 	if (!qp_state_table[cur_state][next_state].valid)
547 		return 0;
548 
549 	req_param = qp_state_table[cur_state][next_state].req_param[type];
550 	opt_param = qp_state_table[cur_state][next_state].opt_param[type];
551 
552 	if ((mask & req_param) != req_param)
553 		return 0;
554 
555 	if (mask & ~(req_param | opt_param | IB_QP_STATE))
556 		return 0;
557 
558 	return 1;
559 }
560 EXPORT_SYMBOL(ib_modify_qp_is_ok);
561 
562 int ib_modify_qp(struct ib_qp *qp,
563 		 struct ib_qp_attr *qp_attr,
564 		 int qp_attr_mask)
565 {
566 	return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL);
567 }
568 EXPORT_SYMBOL(ib_modify_qp);
569 
570 int ib_query_qp(struct ib_qp *qp,
571 		struct ib_qp_attr *qp_attr,
572 		int qp_attr_mask,
573 		struct ib_qp_init_attr *qp_init_attr)
574 {
575 	return qp->device->query_qp ?
576 		qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
577 		-ENOSYS;
578 }
579 EXPORT_SYMBOL(ib_query_qp);
580 
581 int ib_destroy_qp(struct ib_qp *qp)
582 {
583 	struct ib_pd *pd;
584 	struct ib_cq *scq, *rcq;
585 	struct ib_srq *srq;
586 	int ret;
587 
588 	pd  = qp->pd;
589 	scq = qp->send_cq;
590 	rcq = qp->recv_cq;
591 	srq = qp->srq;
592 
593 	ret = qp->device->destroy_qp(qp);
594 	if (!ret) {
595 		atomic_dec(&pd->usecnt);
596 		atomic_dec(&scq->usecnt);
597 		atomic_dec(&rcq->usecnt);
598 		if (srq)
599 			atomic_dec(&srq->usecnt);
600 	}
601 
602 	return ret;
603 }
604 EXPORT_SYMBOL(ib_destroy_qp);
605 
606 /* Completion queues */
607 
608 struct ib_cq *ib_create_cq(struct ib_device *device,
609 			   ib_comp_handler comp_handler,
610 			   void (*event_handler)(struct ib_event *, void *),
611 			   void *cq_context, int cqe, int comp_vector)
612 {
613 	struct ib_cq *cq;
614 
615 	cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
616 
617 	if (!IS_ERR(cq)) {
618 		cq->device        = device;
619 		cq->uobject       = NULL;
620 		cq->comp_handler  = comp_handler;
621 		cq->event_handler = event_handler;
622 		cq->cq_context    = cq_context;
623 		atomic_set(&cq->usecnt, 0);
624 	}
625 
626 	return cq;
627 }
628 EXPORT_SYMBOL(ib_create_cq);
629 
630 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
631 {
632 	return cq->device->modify_cq ?
633 		cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
634 }
635 EXPORT_SYMBOL(ib_modify_cq);
636 
637 int ib_destroy_cq(struct ib_cq *cq)
638 {
639 	if (atomic_read(&cq->usecnt))
640 		return -EBUSY;
641 
642 	return cq->device->destroy_cq(cq);
643 }
644 EXPORT_SYMBOL(ib_destroy_cq);
645 
646 int ib_resize_cq(struct ib_cq *cq, int cqe)
647 {
648 	return cq->device->resize_cq ?
649 		cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
650 }
651 EXPORT_SYMBOL(ib_resize_cq);
652 
653 /* Memory regions */
654 
655 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
656 {
657 	struct ib_mr *mr;
658 
659 	mr = pd->device->get_dma_mr(pd, mr_access_flags);
660 
661 	if (!IS_ERR(mr)) {
662 		mr->device  = pd->device;
663 		mr->pd      = pd;
664 		mr->uobject = NULL;
665 		atomic_inc(&pd->usecnt);
666 		atomic_set(&mr->usecnt, 0);
667 	}
668 
669 	return mr;
670 }
671 EXPORT_SYMBOL(ib_get_dma_mr);
672 
673 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
674 			     struct ib_phys_buf *phys_buf_array,
675 			     int num_phys_buf,
676 			     int mr_access_flags,
677 			     u64 *iova_start)
678 {
679 	struct ib_mr *mr;
680 
681 	if (!pd->device->reg_phys_mr)
682 		return ERR_PTR(-ENOSYS);
683 
684 	mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
685 				     mr_access_flags, iova_start);
686 
687 	if (!IS_ERR(mr)) {
688 		mr->device  = pd->device;
689 		mr->pd      = pd;
690 		mr->uobject = NULL;
691 		atomic_inc(&pd->usecnt);
692 		atomic_set(&mr->usecnt, 0);
693 	}
694 
695 	return mr;
696 }
697 EXPORT_SYMBOL(ib_reg_phys_mr);
698 
699 int ib_rereg_phys_mr(struct ib_mr *mr,
700 		     int mr_rereg_mask,
701 		     struct ib_pd *pd,
702 		     struct ib_phys_buf *phys_buf_array,
703 		     int num_phys_buf,
704 		     int mr_access_flags,
705 		     u64 *iova_start)
706 {
707 	struct ib_pd *old_pd;
708 	int ret;
709 
710 	if (!mr->device->rereg_phys_mr)
711 		return -ENOSYS;
712 
713 	if (atomic_read(&mr->usecnt))
714 		return -EBUSY;
715 
716 	old_pd = mr->pd;
717 
718 	ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
719 					phys_buf_array, num_phys_buf,
720 					mr_access_flags, iova_start);
721 
722 	if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
723 		atomic_dec(&old_pd->usecnt);
724 		atomic_inc(&pd->usecnt);
725 	}
726 
727 	return ret;
728 }
729 EXPORT_SYMBOL(ib_rereg_phys_mr);
730 
731 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
732 {
733 	return mr->device->query_mr ?
734 		mr->device->query_mr(mr, mr_attr) : -ENOSYS;
735 }
736 EXPORT_SYMBOL(ib_query_mr);
737 
738 int ib_dereg_mr(struct ib_mr *mr)
739 {
740 	struct ib_pd *pd;
741 	int ret;
742 
743 	if (atomic_read(&mr->usecnt))
744 		return -EBUSY;
745 
746 	pd = mr->pd;
747 	ret = mr->device->dereg_mr(mr);
748 	if (!ret)
749 		atomic_dec(&pd->usecnt);
750 
751 	return ret;
752 }
753 EXPORT_SYMBOL(ib_dereg_mr);
754 
755 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
756 {
757 	struct ib_mr *mr;
758 
759 	if (!pd->device->alloc_fast_reg_mr)
760 		return ERR_PTR(-ENOSYS);
761 
762 	mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len);
763 
764 	if (!IS_ERR(mr)) {
765 		mr->device  = pd->device;
766 		mr->pd      = pd;
767 		mr->uobject = NULL;
768 		atomic_inc(&pd->usecnt);
769 		atomic_set(&mr->usecnt, 0);
770 	}
771 
772 	return mr;
773 }
774 EXPORT_SYMBOL(ib_alloc_fast_reg_mr);
775 
776 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
777 							  int max_page_list_len)
778 {
779 	struct ib_fast_reg_page_list *page_list;
780 
781 	if (!device->alloc_fast_reg_page_list)
782 		return ERR_PTR(-ENOSYS);
783 
784 	page_list = device->alloc_fast_reg_page_list(device, max_page_list_len);
785 
786 	if (!IS_ERR(page_list)) {
787 		page_list->device = device;
788 		page_list->max_page_list_len = max_page_list_len;
789 	}
790 
791 	return page_list;
792 }
793 EXPORT_SYMBOL(ib_alloc_fast_reg_page_list);
794 
795 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
796 {
797 	page_list->device->free_fast_reg_page_list(page_list);
798 }
799 EXPORT_SYMBOL(ib_free_fast_reg_page_list);
800 
801 /* Memory windows */
802 
803 struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
804 {
805 	struct ib_mw *mw;
806 
807 	if (!pd->device->alloc_mw)
808 		return ERR_PTR(-ENOSYS);
809 
810 	mw = pd->device->alloc_mw(pd);
811 	if (!IS_ERR(mw)) {
812 		mw->device  = pd->device;
813 		mw->pd      = pd;
814 		mw->uobject = NULL;
815 		atomic_inc(&pd->usecnt);
816 	}
817 
818 	return mw;
819 }
820 EXPORT_SYMBOL(ib_alloc_mw);
821 
822 int ib_dealloc_mw(struct ib_mw *mw)
823 {
824 	struct ib_pd *pd;
825 	int ret;
826 
827 	pd = mw->pd;
828 	ret = mw->device->dealloc_mw(mw);
829 	if (!ret)
830 		atomic_dec(&pd->usecnt);
831 
832 	return ret;
833 }
834 EXPORT_SYMBOL(ib_dealloc_mw);
835 
836 /* "Fast" memory regions */
837 
838 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
839 			    int mr_access_flags,
840 			    struct ib_fmr_attr *fmr_attr)
841 {
842 	struct ib_fmr *fmr;
843 
844 	if (!pd->device->alloc_fmr)
845 		return ERR_PTR(-ENOSYS);
846 
847 	fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
848 	if (!IS_ERR(fmr)) {
849 		fmr->device = pd->device;
850 		fmr->pd     = pd;
851 		atomic_inc(&pd->usecnt);
852 	}
853 
854 	return fmr;
855 }
856 EXPORT_SYMBOL(ib_alloc_fmr);
857 
858 int ib_unmap_fmr(struct list_head *fmr_list)
859 {
860 	struct ib_fmr *fmr;
861 
862 	if (list_empty(fmr_list))
863 		return 0;
864 
865 	fmr = list_entry(fmr_list->next, struct ib_fmr, list);
866 	return fmr->device->unmap_fmr(fmr_list);
867 }
868 EXPORT_SYMBOL(ib_unmap_fmr);
869 
870 int ib_dealloc_fmr(struct ib_fmr *fmr)
871 {
872 	struct ib_pd *pd;
873 	int ret;
874 
875 	pd = fmr->pd;
876 	ret = fmr->device->dealloc_fmr(fmr);
877 	if (!ret)
878 		atomic_dec(&pd->usecnt);
879 
880 	return ret;
881 }
882 EXPORT_SYMBOL(ib_dealloc_fmr);
883 
884 /* Multicast groups */
885 
886 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
887 {
888 	if (!qp->device->attach_mcast)
889 		return -ENOSYS;
890 	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
891 		return -EINVAL;
892 
893 	return qp->device->attach_mcast(qp, gid, lid);
894 }
895 EXPORT_SYMBOL(ib_attach_mcast);
896 
897 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
898 {
899 	if (!qp->device->detach_mcast)
900 		return -ENOSYS;
901 	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
902 		return -EINVAL;
903 
904 	return qp->device->detach_mcast(qp, gid, lid);
905 }
906 EXPORT_SYMBOL(ib_detach_mcast);
907