xref: /illumos-gate/usr/src/uts/common/io/ib/adapters/hermon/hermon_ci.c (revision e44e85a7f9935f0428e188393e3da61b17e83884)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * hermon_ci.c
29  *    Hermon Channel Interface (CI) Routines
30  *
31  *    Implements all the routines necessary to interface with the IBTF.
32  *    Pointers to all of these functions are passed to the IBTF at attach()
33  *    time in the ibc_operations_t structure.  These functions include all
34  *    of the necessary routines to implement the required InfiniBand "verbs"
35  *    and additional IBTF-specific interfaces.
36  */
37 
38 #include <sys/types.h>
39 #include <sys/conf.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 
43 #include <sys/ib/adapters/hermon/hermon.h>
44 
45 /* HCA and port related operations */
46 static ibt_status_t hermon_ci_query_hca_ports(ibc_hca_hdl_t, uint8_t,
47     ibt_hca_portinfo_t *);
48 static ibt_status_t hermon_ci_modify_ports(ibc_hca_hdl_t, uint8_t,
49     ibt_port_modify_flags_t, uint8_t);
50 static ibt_status_t hermon_ci_modify_system_image(ibc_hca_hdl_t, ib_guid_t);
51 
52 /* Protection Domains */
53 static ibt_status_t hermon_ci_alloc_pd(ibc_hca_hdl_t, ibt_pd_flags_t,
54     ibc_pd_hdl_t *);
55 static ibt_status_t hermon_ci_free_pd(ibc_hca_hdl_t, ibc_pd_hdl_t);
56 
57 /* Reliable Datagram Domains */
58 static ibt_status_t hermon_ci_alloc_rdd(ibc_hca_hdl_t, ibc_rdd_flags_t,
59     ibc_rdd_hdl_t *);
60 static ibt_status_t hermon_ci_free_rdd(ibc_hca_hdl_t, ibc_rdd_hdl_t);
61 
62 /* Address Handles */
63 static ibt_status_t hermon_ci_alloc_ah(ibc_hca_hdl_t, ibt_ah_flags_t,
64     ibc_pd_hdl_t, ibt_adds_vect_t *, ibc_ah_hdl_t *);
65 static ibt_status_t hermon_ci_free_ah(ibc_hca_hdl_t, ibc_ah_hdl_t);
66 static ibt_status_t hermon_ci_query_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
67     ibc_pd_hdl_t *, ibt_adds_vect_t *);
68 static ibt_status_t hermon_ci_modify_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
69     ibt_adds_vect_t *);
70 
71 /* Queue Pairs */
72 static ibt_status_t hermon_ci_alloc_qp(ibc_hca_hdl_t, ibtl_qp_hdl_t,
73     ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *, ib_qpn_t *,
74     ibc_qp_hdl_t *);
75 static ibt_status_t hermon_ci_alloc_special_qp(ibc_hca_hdl_t, uint8_t,
76     ibtl_qp_hdl_t, ibt_sqp_type_t, ibt_qp_alloc_attr_t *,
77     ibt_chan_sizes_t *, ibc_qp_hdl_t *);
78 static ibt_status_t hermon_ci_alloc_qp_range(ibc_hca_hdl_t, uint_t,
79     ibtl_qp_hdl_t *, ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *,
80     ibc_cq_hdl_t *, ibc_cq_hdl_t *, ib_qpn_t *, ibc_qp_hdl_t *);
81 static ibt_status_t hermon_ci_free_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
82     ibc_free_qp_flags_t, ibc_qpn_hdl_t *);
83 static ibt_status_t hermon_ci_release_qpn(ibc_hca_hdl_t, ibc_qpn_hdl_t);
84 static ibt_status_t hermon_ci_query_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
85     ibt_qp_query_attr_t *);
86 static ibt_status_t hermon_ci_modify_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
87     ibt_cep_modify_flags_t, ibt_qp_info_t *, ibt_queue_sizes_t *);
88 
89 /* Completion Queues */
90 static ibt_status_t hermon_ci_alloc_cq(ibc_hca_hdl_t, ibt_cq_hdl_t,
91     ibt_cq_attr_t *, ibc_cq_hdl_t *, uint_t *);
92 static ibt_status_t hermon_ci_free_cq(ibc_hca_hdl_t, ibc_cq_hdl_t);
93 static ibt_status_t hermon_ci_query_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
94     uint_t *, uint_t *, uint_t *, ibt_cq_handler_id_t *);
95 static ibt_status_t hermon_ci_resize_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
96     uint_t, uint_t *);
97 static ibt_status_t hermon_ci_modify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
98     uint_t, uint_t, ibt_cq_handler_id_t);
99 static ibt_status_t hermon_ci_alloc_cq_sched(ibc_hca_hdl_t,
100     ibt_cq_sched_flags_t, ibc_cq_handler_attr_t *);
101 static ibt_status_t hermon_ci_free_cq_sched(ibc_hca_hdl_t, ibt_cq_handler_id_t);
102 
103 /* EE Contexts */
104 static ibt_status_t hermon_ci_alloc_eec(ibc_hca_hdl_t, ibc_eec_flags_t,
105     ibt_eec_hdl_t, ibc_rdd_hdl_t, ibc_eec_hdl_t *);
106 static ibt_status_t hermon_ci_free_eec(ibc_hca_hdl_t, ibc_eec_hdl_t);
107 static ibt_status_t hermon_ci_query_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
108     ibt_eec_query_attr_t *);
109 static ibt_status_t hermon_ci_modify_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
110     ibt_cep_modify_flags_t, ibt_eec_info_t *);
111 
112 /* Memory Registration */
113 static ibt_status_t hermon_ci_register_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
114     ibt_mr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *);
115 static ibt_status_t hermon_ci_register_buf(ibc_hca_hdl_t, ibc_pd_hdl_t,
116     ibt_smr_attr_t *, struct buf *, void *, ibt_mr_hdl_t *, ibt_mr_desc_t *);
117 static ibt_status_t hermon_ci_register_shared_mr(ibc_hca_hdl_t,
118     ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_smr_attr_t *, void *,
119     ibc_mr_hdl_t *, ibt_mr_desc_t *);
120 static ibt_status_t hermon_ci_deregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t);
121 static ibt_status_t hermon_ci_query_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
122     ibt_mr_query_attr_t *);
123 static ibt_status_t hermon_ci_reregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
124     ibc_pd_hdl_t, ibt_mr_attr_t *, void *, ibc_mr_hdl_t *,
125     ibt_mr_desc_t *);
126 static ibt_status_t hermon_ci_reregister_buf(ibc_hca_hdl_t, ibc_mr_hdl_t,
127     ibc_pd_hdl_t, ibt_smr_attr_t *, struct buf *, void *, ibc_mr_hdl_t *,
128     ibt_mr_desc_t *);
129 static ibt_status_t hermon_ci_sync_mr(ibc_hca_hdl_t, ibt_mr_sync_t *, size_t);
130 
131 /* Memory Windows */
132 static ibt_status_t hermon_ci_alloc_mw(ibc_hca_hdl_t, ibc_pd_hdl_t,
133     ibt_mw_flags_t, ibc_mw_hdl_t *, ibt_rkey_t *);
134 static ibt_status_t hermon_ci_free_mw(ibc_hca_hdl_t, ibc_mw_hdl_t);
135 static ibt_status_t hermon_ci_query_mw(ibc_hca_hdl_t, ibc_mw_hdl_t,
136     ibt_mw_query_attr_t *);
137 
138 /* Multicast Groups */
139 static ibt_status_t hermon_ci_attach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
140     ib_gid_t, ib_lid_t);
141 static ibt_status_t hermon_ci_detach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
142     ib_gid_t, ib_lid_t);
143 
144 /* Work Request and Completion Processing */
145 static ibt_status_t hermon_ci_post_send(ibc_hca_hdl_t, ibc_qp_hdl_t,
146     ibt_send_wr_t *, uint_t, uint_t *);
147 static ibt_status_t hermon_ci_post_recv(ibc_hca_hdl_t, ibc_qp_hdl_t,
148     ibt_recv_wr_t *, uint_t, uint_t *);
149 static ibt_status_t hermon_ci_poll_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
150     ibt_wc_t *, uint_t, uint_t *);
151 static ibt_status_t hermon_ci_notify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
152     ibt_cq_notify_flags_t);
153 
154 /* CI Object Private Data */
155 static ibt_status_t hermon_ci_ci_data_in(ibc_hca_hdl_t, ibt_ci_data_flags_t,
156     ibt_object_type_t, void *, void *, size_t);
157 
158 /* CI Object Private Data */
159 static ibt_status_t hermon_ci_ci_data_out(ibc_hca_hdl_t, ibt_ci_data_flags_t,
160     ibt_object_type_t, void *, void *, size_t);
161 
162 /* Shared Receive Queues */
163 static ibt_status_t hermon_ci_alloc_srq(ibc_hca_hdl_t, ibt_srq_flags_t,
164     ibt_srq_hdl_t, ibc_pd_hdl_t, ibt_srq_sizes_t *, ibc_srq_hdl_t *,
165     ibt_srq_sizes_t *);
166 static ibt_status_t hermon_ci_free_srq(ibc_hca_hdl_t, ibc_srq_hdl_t);
167 static ibt_status_t hermon_ci_query_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
168     ibc_pd_hdl_t *, ibt_srq_sizes_t *, uint_t *);
169 static ibt_status_t hermon_ci_modify_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
170     ibt_srq_modify_flags_t, uint_t, uint_t, uint_t *);
171 static ibt_status_t hermon_ci_post_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
172     ibt_recv_wr_t *, uint_t, uint_t *);
173 
174 /* Address translation */
175 static ibt_status_t hermon_ci_map_mem_area(ibc_hca_hdl_t, ibt_va_attr_t *,
176     void *, uint_t, ibt_phys_buf_t *, uint_t *, size_t *, ib_memlen_t *,
177     ibc_ma_hdl_t *);
178 static ibt_status_t hermon_ci_unmap_mem_area(ibc_hca_hdl_t, ibc_ma_hdl_t);
179 static ibt_status_t hermon_ci_map_mem_iov(ibc_hca_hdl_t, ibt_iov_attr_t *,
180     ibt_all_wr_t *, ibc_mi_hdl_t *);
181 static ibt_status_t hermon_ci_unmap_mem_iov(ibc_hca_hdl_t, ibc_mi_hdl_t);
182 
183 /* Allocate L_Key */
184 static ibt_status_t hermon_ci_alloc_lkey(ibc_hca_hdl_t, ibc_pd_hdl_t,
185     ibt_lkey_flags_t, uint_t, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
186 
187 /* Physical Register Memory Region */
188 static ibt_status_t hermon_ci_register_physical_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
189     ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
190 static ibt_status_t hermon_ci_reregister_physical_mr(ibc_hca_hdl_t,
191     ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *,
192     ibt_pmr_desc_t *);
193 
194 /* Mellanox FMR */
195 static ibt_status_t hermon_ci_create_fmr_pool(ibc_hca_hdl_t hca,
196     ibc_pd_hdl_t pd, ibt_fmr_pool_attr_t *fmr_params,
197     ibc_fmr_pool_hdl_t *fmr_pool);
198 static ibt_status_t hermon_ci_destroy_fmr_pool(ibc_hca_hdl_t hca,
199     ibc_fmr_pool_hdl_t fmr_pool);
200 static ibt_status_t hermon_ci_flush_fmr_pool(ibc_hca_hdl_t hca,
201     ibc_fmr_pool_hdl_t fmr_pool);
202 static ibt_status_t hermon_ci_register_physical_fmr(ibc_hca_hdl_t hca,
203     ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
204     void *ibtl_reserved, ibc_mr_hdl_t *mr_hdl_p, ibt_pmr_desc_t *mem_desc_p);
205 static ibt_status_t hermon_ci_deregister_fmr(ibc_hca_hdl_t hca,
206     ibc_mr_hdl_t mr);
207 
208 /* Memory Allocation/Deallocation */
209 static ibt_status_t hermon_ci_alloc_io_mem(ibc_hca_hdl_t hca, size_t size,
210     ibt_mr_flags_t mr_flag, caddr_t *kaddrp,
211     ibc_mem_alloc_hdl_t *mem_alloc_hdl_p);
212 static ibt_status_t hermon_ci_free_io_mem(ibc_hca_hdl_t hca,
213     ibc_mem_alloc_hdl_t mem_alloc_hdl);
214 
215 /*
216  * This ibc_operations_t structure includes pointers to all the entry points
217  * provided by the Hermon driver.  This structure is passed to the IBTF at
218  * driver attach time, using the ibc_attach() call.
219  */
220 ibc_operations_t hermon_ibc_ops = {
221 	/* HCA and port related operations */
222 	hermon_ci_query_hca_ports,
223 	hermon_ci_modify_ports,
224 	hermon_ci_modify_system_image,
225 
226 	/* Protection Domains */
227 	hermon_ci_alloc_pd,
228 	hermon_ci_free_pd,
229 
230 	/* Reliable Datagram Domains */
231 	hermon_ci_alloc_rdd,
232 	hermon_ci_free_rdd,
233 
234 	/* Address Handles */
235 	hermon_ci_alloc_ah,
236 	hermon_ci_free_ah,
237 	hermon_ci_query_ah,
238 	hermon_ci_modify_ah,
239 
240 	/* Queue Pairs */
241 	hermon_ci_alloc_qp,
242 	hermon_ci_alloc_special_qp,
243 	hermon_ci_alloc_qp_range,
244 	hermon_ci_free_qp,
245 	hermon_ci_release_qpn,
246 	hermon_ci_query_qp,
247 	hermon_ci_modify_qp,
248 
249 	/* Completion Queues */
250 	hermon_ci_alloc_cq,
251 	hermon_ci_free_cq,
252 	hermon_ci_query_cq,
253 	hermon_ci_resize_cq,
254 	hermon_ci_modify_cq,
255 	hermon_ci_alloc_cq_sched,
256 	hermon_ci_free_cq_sched,
257 
258 	/* EE Contexts */
259 	hermon_ci_alloc_eec,
260 	hermon_ci_free_eec,
261 	hermon_ci_query_eec,
262 	hermon_ci_modify_eec,
263 
264 	/* Memory Registration */
265 	hermon_ci_register_mr,
266 	hermon_ci_register_buf,
267 	hermon_ci_register_shared_mr,
268 	hermon_ci_deregister_mr,
269 	hermon_ci_query_mr,
270 	hermon_ci_reregister_mr,
271 	hermon_ci_reregister_buf,
272 	hermon_ci_sync_mr,
273 
274 	/* Memory Windows */
275 	hermon_ci_alloc_mw,
276 	hermon_ci_free_mw,
277 	hermon_ci_query_mw,
278 
279 	/* Multicast Groups */
280 	hermon_ci_attach_mcg,
281 	hermon_ci_detach_mcg,
282 
283 	/* Work Request and Completion Processing */
284 	hermon_ci_post_send,
285 	hermon_ci_post_recv,
286 	hermon_ci_poll_cq,
287 	hermon_ci_notify_cq,
288 
289 	/* CI Object Mapping Data */
290 	hermon_ci_ci_data_in,
291 	hermon_ci_ci_data_out,
292 
293 	/* Shared Receive Queue */
294 	hermon_ci_alloc_srq,
295 	hermon_ci_free_srq,
296 	hermon_ci_query_srq,
297 	hermon_ci_modify_srq,
298 	hermon_ci_post_srq,
299 
300 	/* Address translation */
301 	hermon_ci_map_mem_area,
302 	hermon_ci_unmap_mem_area,
303 	hermon_ci_map_mem_iov,
304 	hermon_ci_unmap_mem_iov,
305 
306 	/* Allocate L_key */
307 	hermon_ci_alloc_lkey,
308 
309 	/* Physical Register Memory Region */
310 	hermon_ci_register_physical_mr,
311 	hermon_ci_reregister_physical_mr,
312 
313 	/* Mellanox FMR */
314 	hermon_ci_create_fmr_pool,
315 	hermon_ci_destroy_fmr_pool,
316 	hermon_ci_flush_fmr_pool,
317 	hermon_ci_register_physical_fmr,
318 	hermon_ci_deregister_fmr,
319 
320 	/* Memory allocation */
321 	hermon_ci_alloc_io_mem,
322 	hermon_ci_free_io_mem,
323 };
324 
325 
326 /*
327  * hermon_ci_query_hca_ports()
328  *    Returns HCA port attributes for either one or all of the HCA's ports.
329  *    Context: Can be called only from user or kernel context.
330  */
331 static ibt_status_t
332 hermon_ci_query_hca_ports(ibc_hca_hdl_t hca, uint8_t query_port,
333     ibt_hca_portinfo_t *info_p)
334 {
335 	hermon_state_t	*state;
336 	uint_t		start, end, port;
337 	int		status, indx;
338 
339 	/* Check for valid HCA handle */
340 	if (hca == NULL) {
341 		return (IBT_HCA_HDL_INVALID);
342 	}
343 
344 	/* Grab the Hermon softstate pointer */
345 	state = (hermon_state_t *)hca;
346 
347 	/*
348 	 * If the specified port is zero, then we are supposed to query all
349 	 * ports.  Otherwise, we query only the port number specified.
350 	 * Setup the start and end port numbers as appropriate for the loop
351 	 * below.  Note:  The first Hermon port is port number one (1).
352 	 */
353 	if (query_port == 0) {
354 		start = 1;
355 		end = start + (state->hs_cfg_profile->cp_num_ports - 1);
356 	} else {
357 		end = start = query_port;
358 	}
359 
360 	/* Query the port(s) */
361 	for (port = start, indx = 0; port <= end; port++, indx++) {
362 		status = hermon_port_query(state, port, &info_p[indx]);
363 		if (status != DDI_SUCCESS) {
364 			return (status);
365 		}
366 	}
367 	return (IBT_SUCCESS);
368 }
369 
370 
371 /*
372  * hermon_ci_modify_ports()
373  *    Modify HCA port attributes
374  *    Context: Can be called only from user or kernel context.
375  */
376 static ibt_status_t
377 hermon_ci_modify_ports(ibc_hca_hdl_t hca, uint8_t port,
378     ibt_port_modify_flags_t flags, uint8_t init_type)
379 {
380 	hermon_state_t	*state;
381 	int		status;
382 
383 	/* Check for valid HCA handle */
384 	if (hca == NULL) {
385 		return (IBT_HCA_HDL_INVALID);
386 	}
387 
388 	/* Grab the Hermon softstate pointer */
389 	state = (hermon_state_t *)hca;
390 
391 	/* Modify the port(s) */
392 	status = hermon_port_modify(state, port, flags, init_type);
393 	return (status);
394 }
395 
396 /*
397  * hermon_ci_modify_system_image()
398  *    Modify the System Image GUID
399  *    Context: Can be called only from user or kernel context.
400  */
401 /* ARGSUSED */
402 static ibt_status_t
403 hermon_ci_modify_system_image(ibc_hca_hdl_t hca, ib_guid_t sys_guid)
404 {
405 	/*
406 	 * This is an unsupported interface for the Hermon driver.  This
407 	 * interface is necessary to support modification of the System
408 	 * Image GUID.  Hermon is only capable of modifying this parameter
409 	 * once (during driver initialization).
410 	 */
411 	return (IBT_NOT_SUPPORTED);
412 }
413 
414 /*
415  * hermon_ci_alloc_pd()
416  *    Allocate a Protection Domain
417  *    Context: Can be called only from user or kernel context.
418  */
419 /* ARGSUSED */
420 static ibt_status_t
421 hermon_ci_alloc_pd(ibc_hca_hdl_t hca, ibt_pd_flags_t flags, ibc_pd_hdl_t *pd_p)
422 {
423 	hermon_state_t	*state;
424 	hermon_pdhdl_t	pdhdl;
425 	int		status;
426 
427 	ASSERT(pd_p != NULL);
428 
429 	/* Check for valid HCA handle */
430 	if (hca == NULL) {
431 		return (IBT_HCA_HDL_INVALID);
432 	}
433 
434 	/* Grab the Hermon softstate pointer */
435 	state = (hermon_state_t *)hca;
436 
437 	/* Allocate the PD */
438 	status = hermon_pd_alloc(state, &pdhdl, HERMON_NOSLEEP);
439 	if (status != DDI_SUCCESS) {
440 		return (status);
441 	}
442 
443 	/* Return the Hermon PD handle */
444 	*pd_p = (ibc_pd_hdl_t)pdhdl;
445 
446 	return (IBT_SUCCESS);
447 }
448 
449 
450 /*
451  * hermon_ci_free_pd()
452  *    Free a Protection Domain
453  *    Context: Can be called only from user or kernel context
454  */
455 static ibt_status_t
456 hermon_ci_free_pd(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd)
457 {
458 	hermon_state_t		*state;
459 	hermon_pdhdl_t		pdhdl;
460 	int			status;
461 
462 	/* Check for valid HCA handle */
463 	if (hca == NULL) {
464 		return (IBT_HCA_HDL_INVALID);
465 	}
466 
467 	/* Check for valid PD handle pointer */
468 	if (pd == NULL) {
469 		return (IBT_PD_HDL_INVALID);
470 	}
471 
472 	/* Grab the Hermon softstate pointer and PD handle */
473 	state = (hermon_state_t *)hca;
474 	pdhdl = (hermon_pdhdl_t)pd;
475 
476 	/* Free the PD */
477 	status = hermon_pd_free(state, &pdhdl);
478 	return (status);
479 }
480 
481 
482 /*
483  * hermon_ci_alloc_rdd()
484  *    Allocate a Reliable Datagram Domain
485  *    Context: Can be called only from user or kernel context.
486  */
487 /* ARGSUSED */
488 static ibt_status_t
489 hermon_ci_alloc_rdd(ibc_hca_hdl_t hca, ibc_rdd_flags_t flags,
490     ibc_rdd_hdl_t *rdd_p)
491 {
492 	/*
493 	 * This is an unsupported interface for the Hermon driver.  This
494 	 * interface is necessary to support Reliable Datagram (RD)
495 	 * operations.  Hermon does not support RD.
496 	 */
497 	return (IBT_NOT_SUPPORTED);
498 }
499 
500 
501 /*
502  * hermon_free_rdd()
503  *    Free a Reliable Datagram Domain
504  *    Context: Can be called only from user or kernel context.
505  */
506 /* ARGSUSED */
507 static ibt_status_t
508 hermon_ci_free_rdd(ibc_hca_hdl_t hca, ibc_rdd_hdl_t rdd)
509 {
510 	/*
511 	 * This is an unsupported interface for the Hermon driver.  This
512 	 * interface is necessary to support Reliable Datagram (RD)
513 	 * operations.  Hermon does not support RD.
514 	 */
515 	return (IBT_NOT_SUPPORTED);
516 }
517 
518 
519 /*
520  * hermon_ci_alloc_ah()
521  *    Allocate an Address Handle
522  *    Context: Can be called only from user or kernel context.
523  */
524 /* ARGSUSED */
525 static ibt_status_t
526 hermon_ci_alloc_ah(ibc_hca_hdl_t hca, ibt_ah_flags_t flags, ibc_pd_hdl_t pd,
527     ibt_adds_vect_t *attr_p, ibc_ah_hdl_t *ah_p)
528 {
529 	hermon_state_t	*state;
530 	hermon_ahhdl_t	ahhdl;
531 	hermon_pdhdl_t	pdhdl;
532 	int		status;
533 
534 	/* Check for valid HCA handle */
535 	if (hca == NULL) {
536 		return (IBT_HCA_HDL_INVALID);
537 	}
538 
539 	/* Check for valid PD handle pointer */
540 	if (pd == NULL) {
541 		return (IBT_PD_HDL_INVALID);
542 	}
543 
544 	/* Grab the Hermon softstate pointer and PD handle */
545 	state = (hermon_state_t *)hca;
546 	pdhdl = (hermon_pdhdl_t)pd;
547 
548 	/* Allocate the AH */
549 	status = hermon_ah_alloc(state, pdhdl, attr_p, &ahhdl, HERMON_NOSLEEP);
550 	if (status != DDI_SUCCESS) {
551 		return (status);
552 	}
553 
554 	/* Return the Hermon AH handle */
555 	*ah_p = (ibc_ah_hdl_t)ahhdl;
556 
557 	return (IBT_SUCCESS);
558 }
559 
560 
561 /*
562  * hermon_ci_free_ah()
563  *    Free an Address Handle
564  *    Context: Can be called only from user or kernel context.
565  */
566 static ibt_status_t
567 hermon_ci_free_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah)
568 {
569 	hermon_state_t	*state;
570 	hermon_ahhdl_t	ahhdl;
571 	int		status;
572 
573 	/* Check for valid HCA handle */
574 	if (hca == NULL) {
575 		return (IBT_HCA_HDL_INVALID);
576 	}
577 
578 	/* Check for valid address handle pointer */
579 	if (ah == NULL) {
580 		return (IBT_AH_HDL_INVALID);
581 	}
582 
583 	/* Grab the Hermon softstate pointer and AH handle */
584 	state = (hermon_state_t *)hca;
585 	ahhdl = (hermon_ahhdl_t)ah;
586 
587 	/* Free the AH */
588 	status = hermon_ah_free(state, &ahhdl, HERMON_NOSLEEP);
589 
590 	return (status);
591 }
592 
593 
594 /*
595  * hermon_ci_query_ah()
596  *    Return the Address Vector information for a specified Address Handle
597  *    Context: Can be called from interrupt or base context.
598  */
599 static ibt_status_t
600 hermon_ci_query_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibc_pd_hdl_t *pd_p,
601     ibt_adds_vect_t *attr_p)
602 {
603 	hermon_state_t	*state;
604 	hermon_ahhdl_t	ahhdl;
605 	hermon_pdhdl_t	pdhdl;
606 	int		status;
607 
608 	/* Check for valid HCA handle */
609 	if (hca == NULL) {
610 		return (IBT_HCA_HDL_INVALID);
611 	}
612 
613 	/* Check for valid address handle pointer */
614 	if (ah == NULL) {
615 		return (IBT_AH_HDL_INVALID);
616 	}
617 
618 	/* Grab the Hermon softstate pointer and AH handle */
619 	state = (hermon_state_t *)hca;
620 	ahhdl = (hermon_ahhdl_t)ah;
621 
622 	/* Query the AH */
623 	status = hermon_ah_query(state, ahhdl, &pdhdl, attr_p);
624 	if (status != DDI_SUCCESS) {
625 		return (status);
626 	}
627 
628 	/* Return the Hermon PD handle */
629 	*pd_p = (ibc_pd_hdl_t)pdhdl;
630 
631 	return (IBT_SUCCESS);
632 }
633 
634 
635 /*
636  * hermon_ci_modify_ah()
637  *    Modify the Address Vector information of a specified Address Handle
638  *    Context: Can be called from interrupt or base context.
639  */
640 static ibt_status_t
641 hermon_ci_modify_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibt_adds_vect_t *attr_p)
642 {
643 	hermon_state_t	*state;
644 	hermon_ahhdl_t	ahhdl;
645 	int		status;
646 
647 	/* Check for valid HCA handle */
648 	if (hca == NULL) {
649 		return (IBT_HCA_HDL_INVALID);
650 	}
651 
652 	/* Check for valid address handle pointer */
653 	if (ah == NULL) {
654 		return (IBT_AH_HDL_INVALID);
655 	}
656 
657 	/* Grab the Hermon softstate pointer and AH handle */
658 	state = (hermon_state_t *)hca;
659 	ahhdl = (hermon_ahhdl_t)ah;
660 
661 	/* Modify the AH */
662 	status = hermon_ah_modify(state, ahhdl, attr_p);
663 
664 	return (status);
665 }
666 
667 
668 /*
669  * hermon_ci_alloc_qp()
670  *    Allocate a Queue Pair
671  *    Context: Can be called only from user or kernel context.
672  */
673 static ibt_status_t
674 hermon_ci_alloc_qp(ibc_hca_hdl_t hca, ibtl_qp_hdl_t ibt_qphdl,
675     ibt_qp_type_t type, ibt_qp_alloc_attr_t *attr_p,
676     ibt_chan_sizes_t *queue_sizes_p, ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
677 {
678 	hermon_state_t		*state;
679 	hermon_qp_info_t		qpinfo;
680 	int			status;
681 
682 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
683 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
684 
685 	/* Check for valid HCA handle */
686 	if (hca == NULL) {
687 		return (IBT_HCA_HDL_INVALID);
688 	}
689 
690 	/* Grab the Hermon softstate pointer */
691 	state = (hermon_state_t *)hca;
692 
693 	/* Allocate the QP */
694 	qpinfo.qpi_attrp	= attr_p;
695 	qpinfo.qpi_type		= type;
696 	qpinfo.qpi_ibt_qphdl	= ibt_qphdl;
697 	qpinfo.qpi_queueszp	= queue_sizes_p;
698 	qpinfo.qpi_qpn		= qpn;
699 	status = hermon_qp_alloc(state, &qpinfo, HERMON_NOSLEEP);
700 	if (status != DDI_SUCCESS) {
701 		return (status);
702 	}
703 
704 	/* Return the Hermon QP handle */
705 	*qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
706 
707 	return (IBT_SUCCESS);
708 }
709 
710 
711 /*
712  * hermon_ci_alloc_special_qp()
713  *    Allocate a Special Queue Pair
714  *    Context: Can be called only from user or kernel context.
715  */
716 static ibt_status_t
717 hermon_ci_alloc_special_qp(ibc_hca_hdl_t hca, uint8_t port,
718     ibtl_qp_hdl_t ibt_qphdl, ibt_sqp_type_t type,
719     ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
720     ibc_qp_hdl_t *qp_p)
721 {
722 	hermon_state_t		*state;
723 	hermon_qp_info_t		qpinfo;
724 	int			status;
725 
726 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
727 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
728 
729 	/* Check for valid HCA handle */
730 	if (hca == NULL) {
731 		return (IBT_HCA_HDL_INVALID);
732 	}
733 
734 	/* Grab the Hermon softstate pointer */
735 	state = (hermon_state_t *)hca;
736 
737 	/* Allocate the Special QP */
738 	qpinfo.qpi_attrp	= attr_p;
739 	qpinfo.qpi_type		= type;
740 	qpinfo.qpi_port		= port;
741 	qpinfo.qpi_ibt_qphdl	= ibt_qphdl;
742 	qpinfo.qpi_queueszp	= queue_sizes_p;
743 	status = hermon_special_qp_alloc(state, &qpinfo, HERMON_NOSLEEP);
744 	if (status != DDI_SUCCESS) {
745 		return (status);
746 	}
747 	/* Return the Hermon QP handle */
748 	*qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
749 
750 	return (IBT_SUCCESS);
751 }
752 
753 /* ARGSUSED */
754 static ibt_status_t
755 hermon_ci_alloc_qp_range(ibc_hca_hdl_t hca, uint_t log2,
756     ibtl_qp_hdl_t *ibtl_qp_p, ibt_qp_type_t type,
757     ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
758     ibc_cq_hdl_t *send_cq_p, ibc_cq_hdl_t *recv_cq_p,
759     ib_qpn_t *qpn_p, ibc_qp_hdl_t *qp_p)
760 {
761 	return (IBT_NOT_SUPPORTED);
762 }
763 
764 /*
765  * hermon_ci_free_qp()
766  *    Free a Queue Pair
767  *    Context: Can be called only from user or kernel context.
768  */
769 static ibt_status_t
770 hermon_ci_free_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
771     ibc_free_qp_flags_t free_qp_flags, ibc_qpn_hdl_t *qpnh_p)
772 {
773 	hermon_state_t	*state;
774 	hermon_qphdl_t	qphdl;
775 	int		status;
776 
777 	/* Check for valid HCA handle */
778 	if (hca == NULL) {
779 		return (IBT_HCA_HDL_INVALID);
780 	}
781 
782 	/* Check for valid QP handle pointer */
783 	if (qp == NULL) {
784 		return (IBT_QP_HDL_INVALID);
785 	}
786 
787 	/* Grab the Hermon softstate pointer and QP handle */
788 	state = (hermon_state_t *)hca;
789 	qphdl = (hermon_qphdl_t)qp;
790 
791 	/* Free the QP */
792 	status = hermon_qp_free(state, &qphdl, free_qp_flags, qpnh_p,
793 	    HERMON_NOSLEEP);
794 
795 	return (status);
796 }
797 
798 
799 /*
800  * hermon_ci_release_qpn()
801  *    Release a Queue Pair Number (QPN)
802  *    Context: Can be called only from user or kernel context.
803  */
804 static ibt_status_t
805 hermon_ci_release_qpn(ibc_hca_hdl_t hca, ibc_qpn_hdl_t qpnh)
806 {
807 	hermon_state_t		*state;
808 	hermon_qpn_entry_t	*entry;
809 
810 	/* Check for valid HCA handle */
811 	if (hca == NULL) {
812 		return (IBT_HCA_HDL_INVALID);
813 	}
814 
815 	/* Check for valid QP handle pointer */
816 	if (qpnh == NULL) {
817 		return (IBT_QP_HDL_INVALID);
818 	}
819 
820 	/* Grab the Hermon softstate pointer and QP handle */
821 	state = (hermon_state_t *)hca;
822 	entry = (hermon_qpn_entry_t *)qpnh;
823 
824 	/* Release the QP number */
825 	hermon_qp_release_qpn(state, entry, HERMON_QPN_RELEASE);
826 
827 	return (IBT_SUCCESS);
828 }
829 
830 
831 /*
832  * hermon_ci_query_qp()
833  *    Query a Queue Pair
834  *    Context: Can be called from interrupt or base context.
835  */
836 static ibt_status_t
837 hermon_ci_query_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
838     ibt_qp_query_attr_t *attr_p)
839 {
840 	hermon_state_t	*state;
841 	hermon_qphdl_t	qphdl;
842 	int		status;
843 
844 	/* Check for valid HCA handle */
845 	if (hca == NULL) {
846 		return (IBT_HCA_HDL_INVALID);
847 	}
848 
849 	/* Check for valid QP handle */
850 	if (qp == NULL) {
851 		return (IBT_QP_HDL_INVALID);
852 	}
853 
854 	/* Grab the Hermon softstate pointer and QP handle */
855 	state = (hermon_state_t *)hca;
856 	qphdl = (hermon_qphdl_t)qp;
857 
858 	/* Query the QP */
859 	status = hermon_qp_query(state, qphdl, attr_p);
860 	return (status);
861 }
862 
863 
864 /*
865  * hermon_ci_modify_qp()
866  *    Modify a Queue Pair
867  *    Context: Can be called from interrupt or base context.
868  */
869 static ibt_status_t
870 hermon_ci_modify_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
871     ibt_cep_modify_flags_t flags, ibt_qp_info_t *info_p,
872     ibt_queue_sizes_t *actual_sz)
873 {
874 	hermon_state_t	*state;
875 	hermon_qphdl_t	qphdl;
876 	int		status;
877 
878 	/* Check for valid HCA handle */
879 	if (hca == NULL) {
880 		return (IBT_HCA_HDL_INVALID);
881 	}
882 
883 	/* Check for valid QP handle */
884 	if (qp == NULL) {
885 		return (IBT_QP_HDL_INVALID);
886 	}
887 
888 	/* Grab the Hermon softstate pointer and QP handle */
889 	state = (hermon_state_t *)hca;
890 	qphdl = (hermon_qphdl_t)qp;
891 
892 	/* Modify the QP */
893 	status = hermon_qp_modify(state, qphdl, flags, info_p, actual_sz);
894 	return (status);
895 }
896 
897 
898 /*
899  * hermon_ci_alloc_cq()
900  *    Allocate a Completion Queue
901  *    Context: Can be called only from user or kernel context.
902  */
903 /* ARGSUSED */
904 static ibt_status_t
905 hermon_ci_alloc_cq(ibc_hca_hdl_t hca, ibt_cq_hdl_t ibt_cqhdl,
906     ibt_cq_attr_t *attr_p, ibc_cq_hdl_t *cq_p, uint_t *actual_size)
907 {
908 	hermon_state_t	*state;
909 	hermon_cqhdl_t	cqhdl;
910 	int		status;
911 
912 	/* Check for valid HCA handle */
913 	if (hca == NULL) {
914 		return (IBT_HCA_HDL_INVALID);
915 	}
916 	/* Grab the Hermon softstate pointer */
917 	state = (hermon_state_t *)hca;
918 
919 
920 	/* Allocate the CQ */
921 	status = hermon_cq_alloc(state, ibt_cqhdl, attr_p, actual_size,
922 	    &cqhdl, HERMON_NOSLEEP);
923 	if (status != DDI_SUCCESS) {
924 		return (status);
925 	}
926 
927 	/* Return the Hermon CQ handle */
928 	*cq_p = (ibc_cq_hdl_t)cqhdl;
929 
930 	return (IBT_SUCCESS);
931 }
932 
933 
934 /*
935  * hermon_ci_free_cq()
936  *    Free a Completion Queue
937  *    Context: Can be called only from user or kernel context.
938  */
939 static ibt_status_t
940 hermon_ci_free_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq)
941 {
942 	hermon_state_t	*state;
943 	hermon_cqhdl_t	cqhdl;
944 	int		status;
945 
946 	/* Check for valid HCA handle */
947 	if (hca == NULL) {
948 		return (IBT_HCA_HDL_INVALID);
949 	}
950 
951 	/* Check for valid CQ handle pointer */
952 	if (cq == NULL) {
953 		return (IBT_CQ_HDL_INVALID);
954 	}
955 
956 	/* Grab the Hermon softstate pointer and CQ handle */
957 	state = (hermon_state_t *)hca;
958 	cqhdl = (hermon_cqhdl_t)cq;
959 
960 
961 	/* Free the CQ */
962 	status = hermon_cq_free(state, &cqhdl, HERMON_NOSLEEP);
963 	return (status);
964 }
965 
966 
967 /*
968  * hermon_ci_query_cq()
969  *    Return the size of a Completion Queue
970  *    Context: Can be called only from user or kernel context.
971  */
972 static ibt_status_t
973 hermon_ci_query_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t *entries_p,
974     uint_t *count_p, uint_t *usec_p, ibt_cq_handler_id_t *hid_p)
975 {
976 	hermon_cqhdl_t	cqhdl;
977 
978 	/* Check for valid HCA handle */
979 	if (hca == NULL) {
980 		return (IBT_HCA_HDL_INVALID);
981 	}
982 
983 	/* Check for valid CQ handle pointer */
984 	if (cq == NULL) {
985 		return (IBT_CQ_HDL_INVALID);
986 	}
987 
988 	/* Grab the CQ handle */
989 	cqhdl = (hermon_cqhdl_t)cq;
990 
991 	/* Query the current CQ size */
992 	*entries_p = cqhdl->cq_bufsz;
993 	*count_p = cqhdl->cq_intmod_count;
994 	*usec_p = cqhdl->cq_intmod_usec;
995 	*hid_p = 0;
996 
997 	return (IBT_SUCCESS);
998 }
999 
1000 
1001 /*
1002  * hermon_ci_resize_cq()
1003  *    Change the size of a Completion Queue
1004  *    Context: Can be called only from user or kernel context.
1005  */
1006 static ibt_status_t
1007 hermon_ci_resize_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t size,
1008     uint_t *actual_size)
1009 {
1010 	hermon_state_t		*state;
1011 	hermon_cqhdl_t		cqhdl;
1012 	int			status;
1013 
1014 	/* Check for valid HCA handle */
1015 	if (hca == NULL) {
1016 		return (IBT_HCA_HDL_INVALID);
1017 	}
1018 
1019 	/* Check for valid CQ handle pointer */
1020 	if (cq == NULL) {
1021 		return (IBT_CQ_HDL_INVALID);
1022 	}
1023 
1024 	/* Grab the Hermon softstate pointer and CQ handle */
1025 	state = (hermon_state_t *)hca;
1026 	cqhdl = (hermon_cqhdl_t)cq;
1027 
1028 	/* Resize the CQ */
1029 	status = hermon_cq_resize(state, cqhdl, size, actual_size,
1030 	    HERMON_NOSLEEP);
1031 	if (status != DDI_SUCCESS) {
1032 		return (status);
1033 	}
1034 	return (IBT_SUCCESS);
1035 }
1036 
1037 /*
1038  * hermon_ci_modify_cq()
1039  *    Change the interrupt moderation values of a Completion Queue
1040  *    Context: Can be called only from user or kernel context.
1041  */
1042 static ibt_status_t
1043 hermon_ci_modify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t count,
1044     uint_t usec, ibt_cq_handler_id_t hid)
1045 {
1046 	hermon_state_t		*state;
1047 	hermon_cqhdl_t		cqhdl;
1048 	int			status;
1049 
1050 	/* Check for valid HCA handle */
1051 	if (hca == NULL) {
1052 		return (IBT_HCA_HDL_INVALID);
1053 	}
1054 
1055 	/* Check for valid CQ handle pointer */
1056 	if (cq == NULL) {
1057 		return (IBT_CQ_HDL_INVALID);
1058 	}
1059 
1060 	/* Grab the Hermon softstate pointer and CQ handle */
1061 	state = (hermon_state_t *)hca;
1062 	cqhdl = (hermon_cqhdl_t)cq;
1063 
1064 	/* Resize the CQ */
1065 	status = hermon_cq_modify(state, cqhdl, count, usec, hid,
1066 	    HERMON_NOSLEEP);
1067 	return (status);
1068 }
1069 
1070 
1071 /*
1072  * hermon_ci_alloc_cq_sched()
1073  *    Reserve a CQ scheduling class resource
1074  *    Context: Can be called only from user or kernel context.
1075  */
1076 /* ARGSUSED */
1077 static ibt_status_t
1078 hermon_ci_alloc_cq_sched(ibc_hca_hdl_t hca, ibt_cq_sched_flags_t flags,
1079     ibc_cq_handler_attr_t *handler_attr_p)
1080 {
1081 	if (hca == NULL) {
1082 		return (IBT_HCA_HDL_INVALID);
1083 	}
1084 
1085 	/*
1086 	 * This is an unsupported interface for the Hermon driver.  Hermon
1087 	 * does not support CQ scheduling classes.
1088 	 */
1089 
1090 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*handler_attr_p))
1091 	handler_attr_p->h_id = NULL;
1092 	handler_attr_p->h_pri = 0;
1093 	handler_attr_p->h_bind = NULL;
1094 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*handler_attr_p))
1095 	return (IBT_SUCCESS);
1096 }
1097 
1098 
1099 /*
1100  * hermon_ci_free_cq_sched()
1101  *    Free a CQ scheduling class resource
1102  *    Context: Can be called only from user or kernel context.
1103  */
1104 static ibt_status_t
1105 hermon_ci_free_cq_sched(ibc_hca_hdl_t hca, ibt_cq_handler_id_t handler_id)
1106 {
1107 	if (hca == NULL) {
1108 		return (IBT_HCA_HDL_INVALID);
1109 	}
1110 
1111 	/*
1112 	 * This is an unsupported interface for the Hermon driver.  Hermon
1113 	 * does not support CQ scheduling classes.  Returning a NULL
1114 	 * hint is the way to treat this as unsupported.  We check for
1115 	 * the expected NULL, but do not fail in any case.
1116 	 */
1117 	if (handler_id != NULL) {
1118 		cmn_err(CE_NOTE, "hermon_ci_free_cq_sched: unexpected "
1119 		    "non-NULL handler_id\n");
1120 	}
1121 	return (IBT_SUCCESS);
1122 }
1123 
1124 
1125 /*
1126  * hermon_ci_alloc_eec()
1127  *    Allocate an End-to-End context
1128  *    Context: Can be called only from user or kernel context.
1129  */
1130 /* ARGSUSED */
1131 static ibt_status_t
1132 hermon_ci_alloc_eec(ibc_hca_hdl_t hca, ibc_eec_flags_t flags,
1133     ibt_eec_hdl_t ibt_eec, ibc_rdd_hdl_t rdd, ibc_eec_hdl_t *eec_p)
1134 {
1135 	/*
1136 	 * This is an unsupported interface for the Hermon driver.  This
1137 	 * interface is necessary to support Reliable Datagram (RD)
1138 	 * operations.  Hermon does not support RD.
1139 	 */
1140 	return (IBT_NOT_SUPPORTED);
1141 }
1142 
1143 
1144 /*
1145  * hermon_ci_free_eec()
1146  *    Free an End-to-End context
1147  *    Context: Can be called only from user or kernel context.
1148  */
1149 /* ARGSUSED */
1150 static ibt_status_t
1151 hermon_ci_free_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec)
1152 {
1153 	/*
1154 	 * This is an unsupported interface for the Hermon driver.  This
1155 	 * interface is necessary to support Reliable Datagram (RD)
1156 	 * operations.  Hermon does not support RD.
1157 	 */
1158 	return (IBT_NOT_SUPPORTED);
1159 }
1160 
1161 
1162 /*
1163  * hermon_ci_query_eec()
1164  *    Query an End-to-End context
1165  *    Context: Can be called from interrupt or base context.
1166  */
1167 /* ARGSUSED */
1168 static ibt_status_t
1169 hermon_ci_query_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1170     ibt_eec_query_attr_t *attr_p)
1171 {
1172 	/*
1173 	 * This is an unsupported interface for the Hermon driver.  This
1174 	 * interface is necessary to support Reliable Datagram (RD)
1175 	 * operations.  Hermon does not support RD.
1176 	 */
1177 	return (IBT_NOT_SUPPORTED);
1178 }
1179 
1180 
1181 /*
1182  * hermon_ci_modify_eec()
1183  *    Modify an End-to-End context
1184  *    Context: Can be called from interrupt or base context.
1185  */
1186 /* ARGSUSED */
1187 static ibt_status_t
1188 hermon_ci_modify_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1189     ibt_cep_modify_flags_t flags, ibt_eec_info_t *info_p)
1190 {
1191 	/*
1192 	 * This is an unsupported interface for the Hermon driver.  This
1193 	 * interface is necessary to support Reliable Datagram (RD)
1194 	 * operations.  Hermon does not support RD.
1195 	 */
1196 	return (IBT_NOT_SUPPORTED);
1197 }
1198 
1199 
1200 /*
1201  * hermon_ci_register_mr()
1202  *    Prepare a virtually addressed Memory Region for use by an HCA
1203  *    Context: Can be called from interrupt or base context.
1204  */
1205 /* ARGSUSED */
1206 static ibt_status_t
1207 hermon_ci_register_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1208     ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1209     ibt_mr_desc_t *mr_desc)
1210 {
1211 	hermon_mr_options_t	op;
1212 	hermon_state_t		*state;
1213 	hermon_pdhdl_t		pdhdl;
1214 	hermon_mrhdl_t		mrhdl;
1215 	int			status;
1216 
1217 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1218 
1219 	ASSERT(mr_attr != NULL);
1220 	ASSERT(mr_p != NULL);
1221 	ASSERT(mr_desc != NULL);
1222 
1223 	/* Check for valid HCA handle */
1224 	if (hca == NULL) {
1225 		return (IBT_HCA_HDL_INVALID);
1226 	}
1227 
1228 	/* Check for valid PD handle pointer */
1229 	if (pd == NULL) {
1230 		return (IBT_PD_HDL_INVALID);
1231 	}
1232 
1233 	/*
1234 	 * Validate the access flags.  Both Remote Write and Remote Atomic
1235 	 * require the Local Write flag to be set
1236 	 */
1237 	if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1238 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1239 	    !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1240 		return (IBT_MR_ACCESS_REQ_INVALID);
1241 	}
1242 
1243 	/* Grab the Hermon softstate pointer and PD handle */
1244 	state = (hermon_state_t *)hca;
1245 	pdhdl = (hermon_pdhdl_t)pd;
1246 
1247 	/* Register the memory region */
1248 	op.mro_bind_type   = state->hs_cfg_profile->cp_iommu_bypass;
1249 	op.mro_bind_dmahdl = NULL;
1250 	op.mro_bind_override_addr = 0;
1251 	status = hermon_mr_register(state, pdhdl, mr_attr, &mrhdl,
1252 	    &op, HERMON_MPT_DMPT);
1253 	if (status != DDI_SUCCESS) {
1254 		return (status);
1255 	}
1256 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1257 
1258 	/* Fill in the mr_desc structure */
1259 	mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1260 	mr_desc->md_lkey  = mrhdl->mr_lkey;
1261 	/* Only set RKey if remote access was requested */
1262 	if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1263 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1264 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1265 		mr_desc->md_rkey = mrhdl->mr_rkey;
1266 	}
1267 
1268 	/*
1269 	 * If region is mapped for streaming (i.e. noncoherent), then set
1270 	 * sync is required
1271 	 */
1272 	mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1273 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1274 
1275 	/* Return the Hermon MR handle */
1276 	*mr_p = (ibc_mr_hdl_t)mrhdl;
1277 
1278 	return (IBT_SUCCESS);
1279 }
1280 
1281 
1282 /*
1283  * hermon_ci_register_buf()
1284  *    Prepare a Memory Region specified by buf structure for use by an HCA
1285  *    Context: Can be called from interrupt or base context.
1286  */
1287 /* ARGSUSED */
1288 static ibt_status_t
1289 hermon_ci_register_buf(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1290     ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1291     ibt_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1292 {
1293 	hermon_mr_options_t	op;
1294 	hermon_state_t		*state;
1295 	hermon_pdhdl_t		pdhdl;
1296 	hermon_mrhdl_t		mrhdl;
1297 	int			status;
1298 	ibt_mr_flags_t		flags = attrp->mr_flags;
1299 
1300 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1301 
1302 	ASSERT(mr_p != NULL);
1303 	ASSERT(mr_desc != NULL);
1304 
1305 	/* Check for valid HCA handle */
1306 	if (hca == NULL) {
1307 		return (IBT_HCA_HDL_INVALID);
1308 	}
1309 
1310 	/* Check for valid PD handle pointer */
1311 	if (pd == NULL) {
1312 		return (IBT_PD_HDL_INVALID);
1313 	}
1314 
1315 	/*
1316 	 * Validate the access flags.  Both Remote Write and Remote Atomic
1317 	 * require the Local Write flag to be set
1318 	 */
1319 	if (((flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1320 	    (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1321 	    !(flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1322 		return (IBT_MR_ACCESS_REQ_INVALID);
1323 	}
1324 
1325 	/* Grab the Hermon softstate pointer and PD handle */
1326 	state = (hermon_state_t *)hca;
1327 	pdhdl = (hermon_pdhdl_t)pd;
1328 
1329 	/* Register the memory region */
1330 	op.mro_bind_type   = state->hs_cfg_profile->cp_iommu_bypass;
1331 	op.mro_bind_dmahdl = NULL;
1332 	op.mro_bind_override_addr = 0;
1333 	status = hermon_mr_register_buf(state, pdhdl, attrp, buf,
1334 	    &mrhdl, &op, HERMON_MPT_DMPT);
1335 	if (status != DDI_SUCCESS) {
1336 		return (status);
1337 	}
1338 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1339 
1340 	/* Fill in the mr_desc structure */
1341 	mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1342 	mr_desc->md_lkey  = mrhdl->mr_lkey;
1343 	/* Only set RKey if remote access was requested */
1344 	if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1345 	    (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1346 	    (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1347 		mr_desc->md_rkey = mrhdl->mr_rkey;
1348 	}
1349 
1350 	/*
1351 	 * If region is mapped for streaming (i.e. noncoherent), then set
1352 	 * sync is required
1353 	 */
1354 	mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1355 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1356 
1357 	/* Return the Hermon MR handle */
1358 	*mr_p = (ibc_mr_hdl_t)mrhdl;
1359 
1360 	return (IBT_SUCCESS);
1361 }
1362 
1363 
1364 /*
1365  * hermon_ci_deregister_mr()
1366  *    Deregister a Memory Region from an HCA translation table
1367  *    Context: Can be called only from user or kernel context.
1368  */
1369 static ibt_status_t
1370 hermon_ci_deregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
1371 {
1372 	hermon_state_t		*state;
1373 	hermon_mrhdl_t		mrhdl;
1374 	int			status;
1375 
1376 	/* Check for valid HCA handle */
1377 	if (hca == NULL) {
1378 		return (IBT_HCA_HDL_INVALID);
1379 	}
1380 
1381 	/* Check for valid memory region handle */
1382 	if (mr == NULL) {
1383 		return (IBT_MR_HDL_INVALID);
1384 	}
1385 
1386 	/* Grab the Hermon softstate pointer */
1387 	state = (hermon_state_t *)hca;
1388 	mrhdl = (hermon_mrhdl_t)mr;
1389 
1390 	/*
1391 	 * Deregister the memory region.
1392 	 */
1393 	status = hermon_mr_deregister(state, &mrhdl, HERMON_MR_DEREG_ALL,
1394 	    HERMON_NOSLEEP);
1395 	return (status);
1396 }
1397 
1398 
1399 /*
1400  * hermon_ci_query_mr()
1401  *    Retrieve information about a specified Memory Region
1402  *    Context: Can be called from interrupt or base context.
1403  */
1404 static ibt_status_t
1405 hermon_ci_query_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1406     ibt_mr_query_attr_t *mr_attr)
1407 {
1408 	hermon_state_t		*state;
1409 	hermon_mrhdl_t		mrhdl;
1410 	int			status;
1411 
1412 	ASSERT(mr_attr != NULL);
1413 
1414 	/* Check for valid HCA handle */
1415 	if (hca == NULL) {
1416 		return (IBT_HCA_HDL_INVALID);
1417 	}
1418 
1419 	/* Check for MemRegion handle */
1420 	if (mr == NULL) {
1421 		return (IBT_MR_HDL_INVALID);
1422 	}
1423 
1424 	/* Grab the Hermon softstate pointer and MR handle */
1425 	state = (hermon_state_t *)hca;
1426 	mrhdl = (hermon_mrhdl_t)mr;
1427 
1428 	/* Query the memory region */
1429 	status = hermon_mr_query(state, mrhdl, mr_attr);
1430 	return (status);
1431 }
1432 
1433 
1434 /*
1435  * hermon_ci_register_shared_mr()
1436  *    Create a shared memory region matching an existing Memory Region
1437  *    Context: Can be called from interrupt or base context.
1438  */
1439 /* ARGSUSED */
1440 static ibt_status_t
1441 hermon_ci_register_shared_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1442     ibc_pd_hdl_t pd, ibt_smr_attr_t *mr_attr, void *ibtl_reserved,
1443     ibc_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1444 {
1445 	hermon_state_t		*state;
1446 	hermon_pdhdl_t		pdhdl;
1447 	hermon_mrhdl_t		mrhdl, mrhdl_new;
1448 	int			status;
1449 
1450 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1451 
1452 	ASSERT(mr_attr != NULL);
1453 	ASSERT(mr_p != NULL);
1454 	ASSERT(mr_desc != NULL);
1455 
1456 	/* Check for valid HCA handle */
1457 	if (hca == NULL) {
1458 		return (IBT_HCA_HDL_INVALID);
1459 	}
1460 
1461 	/* Check for valid PD handle pointer */
1462 	if (pd == NULL) {
1463 		return (IBT_PD_HDL_INVALID);
1464 	}
1465 
1466 	/* Check for valid memory region handle */
1467 	if (mr == NULL) {
1468 		return (IBT_MR_HDL_INVALID);
1469 	}
1470 	/*
1471 	 * Validate the access flags.  Both Remote Write and Remote Atomic
1472 	 * require the Local Write flag to be set
1473 	 */
1474 	if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1475 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1476 	    !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1477 		return (IBT_MR_ACCESS_REQ_INVALID);
1478 	}
1479 
1480 	/* Grab the Hermon softstate pointer and handles */
1481 	state = (hermon_state_t *)hca;
1482 	pdhdl = (hermon_pdhdl_t)pd;
1483 	mrhdl = (hermon_mrhdl_t)mr;
1484 
1485 	/* Register the shared memory region */
1486 	status = hermon_mr_register_shared(state, mrhdl, pdhdl, mr_attr,
1487 	    &mrhdl_new);
1488 	if (status != DDI_SUCCESS) {
1489 		return (status);
1490 	}
1491 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1492 
1493 	/* Fill in the mr_desc structure */
1494 	mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1495 	mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1496 	/* Only set RKey if remote access was requested */
1497 	if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1498 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1499 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1500 		mr_desc->md_rkey = mrhdl_new->mr_rkey;
1501 	}
1502 
1503 	/*
1504 	 * If shared region is mapped for streaming (i.e. noncoherent), then
1505 	 * set sync is required
1506 	 */
1507 	mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1508 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1509 
1510 	/* Return the Hermon MR handle */
1511 	*mr_p = (ibc_mr_hdl_t)mrhdl_new;
1512 
1513 	return (IBT_SUCCESS);
1514 }
1515 
1516 
1517 /*
1518  * hermon_ci_reregister_mr()
1519  *    Modify the attributes of an existing Memory Region
1520  *    Context: Can be called from interrupt or base context.
1521  */
1522 /* ARGSUSED */
1523 static ibt_status_t
1524 hermon_ci_reregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1525     ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_new,
1526     ibt_mr_desc_t *mr_desc)
1527 {
1528 	hermon_mr_options_t	op;
1529 	hermon_state_t		*state;
1530 	hermon_pdhdl_t		pdhdl;
1531 	hermon_mrhdl_t		mrhdl, mrhdl_new;
1532 	int			status;
1533 
1534 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1535 
1536 	ASSERT(mr_attr != NULL);
1537 	ASSERT(mr_new != NULL);
1538 	ASSERT(mr_desc != NULL);
1539 
1540 	/* Check for valid HCA handle */
1541 	if (hca == NULL) {
1542 		return (IBT_HCA_HDL_INVALID);
1543 	}
1544 
1545 	/* Check for valid memory region handle */
1546 	if (mr == NULL) {
1547 		return (IBT_MR_HDL_INVALID);
1548 	}
1549 
1550 	/* Grab the Hermon softstate pointer, mrhdl, and pdhdl */
1551 	state = (hermon_state_t *)hca;
1552 	mrhdl = (hermon_mrhdl_t)mr;
1553 	pdhdl = (hermon_pdhdl_t)pd;
1554 
1555 	/* Reregister the memory region */
1556 	op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1557 	status = hermon_mr_reregister(state, mrhdl, pdhdl, mr_attr,
1558 	    &mrhdl_new, &op);
1559 	if (status != DDI_SUCCESS) {
1560 		return (status);
1561 	}
1562 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1563 
1564 	/* Fill in the mr_desc structure */
1565 	mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1566 	mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1567 	/* Only set RKey if remote access was requested */
1568 	if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1569 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1570 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1571 		mr_desc->md_rkey = mrhdl_new->mr_rkey;
1572 	}
1573 
1574 	/*
1575 	 * If region is mapped for streaming (i.e. noncoherent), then set
1576 	 * sync is required
1577 	 */
1578 	mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1579 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1580 
1581 	/* Return the Hermon MR handle */
1582 	*mr_new = (ibc_mr_hdl_t)mrhdl_new;
1583 
1584 	return (IBT_SUCCESS);
1585 }
1586 
1587 
1588 /*
1589  * hermon_ci_reregister_buf()
1590  *    Modify the attributes of an existing Memory Region
1591  *    Context: Can be called from interrupt or base context.
1592  */
1593 /* ARGSUSED */
1594 static ibt_status_t
1595 hermon_ci_reregister_buf(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1596     ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1597     ibc_mr_hdl_t *mr_new, ibt_mr_desc_t *mr_desc)
1598 {
1599 	hermon_mr_options_t	op;
1600 	hermon_state_t		*state;
1601 	hermon_pdhdl_t		pdhdl;
1602 	hermon_mrhdl_t		mrhdl, mrhdl_new;
1603 	int			status;
1604 	ibt_mr_flags_t		flags = attrp->mr_flags;
1605 
1606 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1607 
1608 	ASSERT(mr_new != NULL);
1609 	ASSERT(mr_desc != NULL);
1610 
1611 	/* Check for valid HCA handle */
1612 	if (hca == NULL) {
1613 		return (IBT_HCA_HDL_INVALID);
1614 	}
1615 
1616 	/* Check for valid memory region handle */
1617 	if (mr == NULL) {
1618 		return (IBT_MR_HDL_INVALID);
1619 	}
1620 
1621 	/* Grab the Hermon softstate pointer, mrhdl, and pdhdl */
1622 	state = (hermon_state_t *)hca;
1623 	mrhdl = (hermon_mrhdl_t)mr;
1624 	pdhdl = (hermon_pdhdl_t)pd;
1625 
1626 	/* Reregister the memory region */
1627 	op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1628 	status = hermon_mr_reregister_buf(state, mrhdl, pdhdl, attrp, buf,
1629 	    &mrhdl_new, &op);
1630 	if (status != DDI_SUCCESS) {
1631 		return (status);
1632 	}
1633 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1634 
1635 	/* Fill in the mr_desc structure */
1636 	mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1637 	mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1638 	/* Only set RKey if remote access was requested */
1639 	if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1640 	    (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1641 	    (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1642 		mr_desc->md_rkey = mrhdl_new->mr_rkey;
1643 	}
1644 
1645 	/*
1646 	 * If region is mapped for streaming (i.e. noncoherent), then set
1647 	 * sync is required
1648 	 */
1649 	mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1650 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1651 
1652 	/* Return the Hermon MR handle */
1653 	*mr_new = (ibc_mr_hdl_t)mrhdl_new;
1654 
1655 	return (IBT_SUCCESS);
1656 }
1657 
1658 /*
1659  * hermon_ci_sync_mr()
1660  *    Synchronize access to a Memory Region
1661  *    Context: Can be called from interrupt or base context.
1662  */
1663 static ibt_status_t
1664 hermon_ci_sync_mr(ibc_hca_hdl_t hca, ibt_mr_sync_t *mr_segs, size_t num_segs)
1665 {
1666 	hermon_state_t		*state;
1667 	int			status;
1668 
1669 	ASSERT(mr_segs != NULL);
1670 
1671 	/* Check for valid HCA handle */
1672 	if (hca == NULL) {
1673 		return (IBT_HCA_HDL_INVALID);
1674 	}
1675 
1676 	/* Grab the Hermon softstate pointer */
1677 	state = (hermon_state_t *)hca;
1678 
1679 	/* Sync the memory region */
1680 	status = hermon_mr_sync(state, mr_segs, num_segs);
1681 	return (status);
1682 }
1683 
1684 
1685 /*
1686  * hermon_ci_alloc_mw()
1687  *    Allocate a Memory Window
1688  *    Context: Can be called from interrupt or base context.
1689  */
1690 static ibt_status_t
1691 hermon_ci_alloc_mw(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, ibt_mw_flags_t flags,
1692     ibc_mw_hdl_t *mw_p, ibt_rkey_t *rkey_p)
1693 {
1694 	hermon_state_t		*state;
1695 	hermon_pdhdl_t		pdhdl;
1696 	hermon_mwhdl_t		mwhdl;
1697 	int			status;
1698 
1699 	ASSERT(mw_p != NULL);
1700 	ASSERT(rkey_p != NULL);
1701 
1702 	/* Check for valid HCA handle */
1703 	if (hca == NULL) {
1704 		return (IBT_HCA_HDL_INVALID);
1705 	}
1706 
1707 	/* Check for valid PD handle pointer */
1708 	if (pd == NULL) {
1709 		return (IBT_PD_HDL_INVALID);
1710 	}
1711 
1712 	/* Grab the Hermon softstate pointer and PD handle */
1713 	state = (hermon_state_t *)hca;
1714 	pdhdl = (hermon_pdhdl_t)pd;
1715 
1716 	/* Allocate the memory window */
1717 	status = hermon_mw_alloc(state, pdhdl, flags, &mwhdl);
1718 	if (status != DDI_SUCCESS) {
1719 		return (status);
1720 	}
1721 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mwhdl))
1722 
1723 	/* Return the MW handle and RKey */
1724 	*mw_p = (ibc_mw_hdl_t)mwhdl;
1725 	*rkey_p = mwhdl->mr_rkey;
1726 
1727 	return (IBT_SUCCESS);
1728 }
1729 
1730 
1731 /*
1732  * hermon_ci_free_mw()
1733  *    Free a Memory Window
1734  *    Context: Can be called from interrupt or base context.
1735  */
1736 static ibt_status_t
1737 hermon_ci_free_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw)
1738 {
1739 	hermon_state_t		*state;
1740 	hermon_mwhdl_t		mwhdl;
1741 	int			status;
1742 
1743 	/* Check for valid HCA handle */
1744 	if (hca == NULL) {
1745 		return (IBT_HCA_HDL_INVALID);
1746 	}
1747 
1748 	/* Check for valid MW handle */
1749 	if (mw == NULL) {
1750 		return (IBT_MW_HDL_INVALID);
1751 	}
1752 
1753 	/* Grab the Hermon softstate pointer and MW handle */
1754 	state = (hermon_state_t *)hca;
1755 	mwhdl = (hermon_mwhdl_t)mw;
1756 
1757 	/* Free the memory window */
1758 	status = hermon_mw_free(state, &mwhdl, HERMON_NOSLEEP);
1759 	return (status);
1760 }
1761 
1762 
1763 /*
1764  * hermon_ci_query_mw()
1765  *    Return the attributes of the specified Memory Window
1766  *    Context: Can be called from interrupt or base context.
1767  */
1768 static ibt_status_t
1769 hermon_ci_query_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw,
1770     ibt_mw_query_attr_t *mw_attr_p)
1771 {
1772 	hermon_mwhdl_t		mwhdl;
1773 
1774 	ASSERT(mw_attr_p != NULL);
1775 
1776 	/* Check for valid HCA handle */
1777 	if (hca == NULL) {
1778 		return (IBT_HCA_HDL_INVALID);
1779 	}
1780 
1781 	/* Check for valid MemWin handle */
1782 	if (mw == NULL) {
1783 		return (IBT_MW_HDL_INVALID);
1784 	}
1785 
1786 	/* Query the memory window pointer and fill in the return values */
1787 	mwhdl = (hermon_mwhdl_t)mw;
1788 	mutex_enter(&mwhdl->mr_lock);
1789 	mw_attr_p->mw_pd   = (ibc_pd_hdl_t)mwhdl->mr_pdhdl;
1790 	mw_attr_p->mw_rkey = mwhdl->mr_rkey;
1791 	mutex_exit(&mwhdl->mr_lock);
1792 
1793 	return (IBT_SUCCESS);
1794 }
1795 
1796 
1797 /*
1798  * hermon_ci_attach_mcg()
1799  *    Attach a Queue Pair to a Multicast Group
1800  *    Context: Can be called only from user or kernel context.
1801  */
1802 static ibt_status_t
1803 hermon_ci_attach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
1804     ib_lid_t lid)
1805 {
1806 	hermon_state_t		*state;
1807 	hermon_qphdl_t		qphdl;
1808 	int			status;
1809 
1810 	/* Check for valid HCA handle */
1811 	if (hca == NULL) {
1812 		return (IBT_HCA_HDL_INVALID);
1813 	}
1814 
1815 	/* Check for valid QP handle pointer */
1816 	if (qp == NULL) {
1817 		return (IBT_QP_HDL_INVALID);
1818 	}
1819 
1820 	/* Grab the Hermon softstate pointer and QP handles */
1821 	state = (hermon_state_t *)hca;
1822 	qphdl = (hermon_qphdl_t)qp;
1823 
1824 	/* Attach the QP to the multicast group */
1825 	status = hermon_mcg_attach(state, qphdl, gid, lid);
1826 	return (status);
1827 }
1828 
1829 
1830 /*
1831  * hermon_ci_detach_mcg()
1832  *    Detach a Queue Pair to a Multicast Group
1833  *    Context: Can be called only from user or kernel context.
1834  */
1835 static ibt_status_t
1836 hermon_ci_detach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
1837     ib_lid_t lid)
1838 {
1839 	hermon_state_t		*state;
1840 	hermon_qphdl_t		qphdl;
1841 	int			status;
1842 
1843 	/* Check for valid HCA handle */
1844 	if (hca == NULL) {
1845 		return (IBT_HCA_HDL_INVALID);
1846 	}
1847 
1848 	/* Check for valid QP handle pointer */
1849 	if (qp == NULL) {
1850 		return (IBT_QP_HDL_INVALID);
1851 	}
1852 
1853 	/* Grab the Hermon softstate pointer and QP handle */
1854 	state = (hermon_state_t *)hca;
1855 	qphdl = (hermon_qphdl_t)qp;
1856 
1857 	/* Detach the QP from the multicast group */
1858 	status = hermon_mcg_detach(state, qphdl, gid, lid);
1859 	return (status);
1860 }
1861 
1862 
1863 /*
1864  * hermon_ci_post_send()
1865  *    Post send work requests to the send queue on the specified QP
1866  *    Context: Can be called from interrupt or base context.
1867  */
1868 static ibt_status_t
1869 hermon_ci_post_send(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_send_wr_t *wr_p,
1870     uint_t num_wr, uint_t *num_posted_p)
1871 {
1872 	hermon_state_t		*state;
1873 	hermon_qphdl_t		qphdl;
1874 	int			status;
1875 
1876 	ASSERT(wr_p != NULL);
1877 	ASSERT(num_wr != 0);
1878 
1879 	/* Check for valid HCA handle */
1880 	if (hca == NULL) {
1881 		return (IBT_HCA_HDL_INVALID);
1882 	}
1883 
1884 	/* Check for valid QP handle pointer */
1885 	if (qp == NULL) {
1886 		return (IBT_QP_HDL_INVALID);
1887 	}
1888 
1889 	/* Grab the Hermon softstate pointer and QP handle */
1890 	state = (hermon_state_t *)hca;
1891 	qphdl = (hermon_qphdl_t)qp;
1892 
1893 	/* Post the send WQEs */
1894 	status = hermon_post_send(state, qphdl, wr_p, num_wr, num_posted_p);
1895 	return (status);
1896 }
1897 
1898 
1899 /*
1900  * hermon_ci_post_recv()
1901  *    Post receive work requests to the receive queue on the specified QP
1902  *    Context: Can be called from interrupt or base context.
1903  */
1904 static ibt_status_t
1905 hermon_ci_post_recv(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_recv_wr_t *wr_p,
1906     uint_t num_wr, uint_t *num_posted_p)
1907 {
1908 	hermon_state_t		*state;
1909 	hermon_qphdl_t		qphdl;
1910 	int			status;
1911 
1912 	ASSERT(wr_p != NULL);
1913 	ASSERT(num_wr != 0);
1914 
1915 	state = (hermon_state_t *)hca;
1916 	qphdl = (hermon_qphdl_t)qp;
1917 
1918 	if (state == NULL) {
1919 		return (IBT_HCA_HDL_INVALID);
1920 	}
1921 
1922 	/* Check for valid QP handle pointer */
1923 	if (qphdl == NULL) {
1924 		return (IBT_QP_HDL_INVALID);
1925 	}
1926 
1927 	/* Post the receive WQEs */
1928 	status = hermon_post_recv(state, qphdl, wr_p, num_wr, num_posted_p);
1929 	return (status);
1930 }
1931 
1932 
1933 /*
1934  * hermon_ci_poll_cq()
1935  *    Poll for a work request completion
1936  *    Context: Can be called from interrupt or base context.
1937  */
1938 static ibt_status_t
1939 hermon_ci_poll_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, ibt_wc_t *wc_p,
1940     uint_t num_wc, uint_t *num_polled)
1941 {
1942 	hermon_state_t		*state;
1943 	hermon_cqhdl_t		cqhdl;
1944 	int			status;
1945 
1946 	ASSERT(wc_p != NULL);
1947 
1948 	/* Check for valid HCA handle */
1949 	if (hca == NULL) {
1950 		return (IBT_HCA_HDL_INVALID);
1951 	}
1952 
1953 	/* Check for valid CQ handle pointer */
1954 	if (cq == NULL) {
1955 		return (IBT_CQ_HDL_INVALID);
1956 	}
1957 
1958 	/* Check for valid num_wc field */
1959 	if (num_wc == 0) {
1960 		return (IBT_INVALID_PARAM);
1961 	}
1962 
1963 	/* Grab the Hermon softstate pointer and CQ handle */
1964 	state = (hermon_state_t *)hca;
1965 	cqhdl = (hermon_cqhdl_t)cq;
1966 
1967 	/* Poll for work request completions */
1968 	status = hermon_cq_poll(state, cqhdl, wc_p, num_wc, num_polled);
1969 	return (status);
1970 }
1971 
1972 
1973 /*
1974  * hermon_ci_notify_cq()
1975  *    Enable notification events on the specified CQ
1976  *    Context: Can be called from interrupt or base context.
1977  */
1978 static ibt_status_t
1979 hermon_ci_notify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq_hdl,
1980     ibt_cq_notify_flags_t flags)
1981 {
1982 	hermon_state_t		*state;
1983 	hermon_cqhdl_t		cqhdl;
1984 	int			status;
1985 
1986 	/* Check for valid HCA handle */
1987 	if (hca == NULL) {
1988 		return (IBT_HCA_HDL_INVALID);
1989 	}
1990 
1991 	/* Check for valid CQ handle pointer */
1992 	if (cq_hdl == NULL) {
1993 		return (IBT_CQ_HDL_INVALID);
1994 	}
1995 
1996 	/* Grab the Hermon softstate pointer and CQ handle */
1997 	state = (hermon_state_t *)hca;
1998 	cqhdl = (hermon_cqhdl_t)cq_hdl;
1999 
2000 	/* Enable the CQ notification */
2001 	status = hermon_cq_notify(state, cqhdl, flags);
2002 	return (status);
2003 }
2004 
2005 /*
2006  * hermon_ci_ci_data_in()
2007  *    Exchange CI-specific data.
2008  *    Context: Can be called only from user or kernel context.
2009  */
2010 static ibt_status_t
2011 hermon_ci_ci_data_in(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
2012     ibt_object_type_t object, void *ibc_object_handle, void *data_p,
2013     size_t data_sz)
2014 {
2015 	hermon_state_t		*state;
2016 	int			status;
2017 
2018 	/* Check for valid HCA handle */
2019 	if (hca == NULL) {
2020 		return (IBT_HCA_HDL_INVALID);
2021 	}
2022 
2023 	/* Grab the Hermon softstate pointer */
2024 	state = (hermon_state_t *)hca;
2025 
2026 	/* Get the Hermon userland mapping information */
2027 	status = hermon_umap_ci_data_in(state, flags, object,
2028 	    ibc_object_handle, data_p, data_sz);
2029 	return (status);
2030 }
2031 
2032 /*
2033  * hermon_ci_ci_data_out()
2034  *    Exchange CI-specific data.
2035  *    Context: Can be called only from user or kernel context.
2036  */
2037 static ibt_status_t
2038 hermon_ci_ci_data_out(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
2039     ibt_object_type_t object, void *ibc_object_handle, void *data_p,
2040     size_t data_sz)
2041 {
2042 	hermon_state_t		*state;
2043 	int			status;
2044 
2045 	/* Check for valid HCA handle */
2046 	if (hca == NULL) {
2047 		return (IBT_HCA_HDL_INVALID);
2048 	}
2049 
2050 	/* Grab the Hermon softstate pointer */
2051 	state = (hermon_state_t *)hca;
2052 
2053 	/* Get the Hermon userland mapping information */
2054 	status = hermon_umap_ci_data_out(state, flags, object,
2055 	    ibc_object_handle, data_p, data_sz);
2056 	return (status);
2057 }
2058 
2059 
2060 /*
2061  * hermon_ci_alloc_srq()
2062  *    Allocate a Shared Receive Queue (SRQ)
2063  *    Context: Can be called only from user or kernel context
2064  */
2065 static ibt_status_t
2066 hermon_ci_alloc_srq(ibc_hca_hdl_t hca, ibt_srq_flags_t flags,
2067     ibt_srq_hdl_t ibt_srq, ibc_pd_hdl_t pd, ibt_srq_sizes_t *sizes,
2068     ibc_srq_hdl_t *ibc_srq_p, ibt_srq_sizes_t *ret_sizes_p)
2069 {
2070 	hermon_state_t		*state;
2071 	hermon_pdhdl_t		pdhdl;
2072 	hermon_srqhdl_t		srqhdl;
2073 	hermon_srq_info_t	srqinfo;
2074 	int			status;
2075 
2076 	/* Check for valid HCA handle */
2077 	if (hca == NULL) {
2078 		return (IBT_HCA_HDL_INVALID);
2079 	}
2080 
2081 	state = (hermon_state_t *)hca;
2082 
2083 	/* Check for valid PD handle pointer */
2084 	if (pd == NULL) {
2085 		return (IBT_PD_HDL_INVALID);
2086 	}
2087 
2088 	pdhdl = (hermon_pdhdl_t)pd;
2089 
2090 	srqinfo.srqi_ibt_srqhdl = ibt_srq;
2091 	srqinfo.srqi_pd		= pdhdl;
2092 	srqinfo.srqi_sizes	= sizes;
2093 	srqinfo.srqi_real_sizes	= ret_sizes_p;
2094 	srqinfo.srqi_srqhdl	= &srqhdl;
2095 	srqinfo.srqi_flags	= flags;
2096 
2097 	status = hermon_srq_alloc(state, &srqinfo, HERMON_NOSLEEP);
2098 	if (status != DDI_SUCCESS) {
2099 		return (status);
2100 	}
2101 
2102 	*ibc_srq_p = (ibc_srq_hdl_t)srqhdl;
2103 
2104 	return (IBT_SUCCESS);
2105 }
2106 
2107 /*
2108  * hermon_ci_free_srq()
2109  *    Free a Shared Receive Queue (SRQ)
2110  *    Context: Can be called only from user or kernel context
2111  */
2112 static ibt_status_t
2113 hermon_ci_free_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq)
2114 {
2115 	hermon_state_t	*state;
2116 	hermon_srqhdl_t	srqhdl;
2117 	int		status;
2118 
2119 	/* Check for valid HCA handle */
2120 	if (hca == NULL) {
2121 		return (IBT_HCA_HDL_INVALID);
2122 	}
2123 
2124 	state = (hermon_state_t *)hca;
2125 
2126 	/* Check for valid SRQ handle pointer */
2127 	if (srq == NULL) {
2128 		return (IBT_SRQ_HDL_INVALID);
2129 	}
2130 
2131 	srqhdl = (hermon_srqhdl_t)srq;
2132 
2133 	/* Free the SRQ */
2134 	status = hermon_srq_free(state, &srqhdl, HERMON_NOSLEEP);
2135 	return (status);
2136 }
2137 
2138 /*
2139  * hermon_ci_query_srq()
2140  *    Query properties of a Shared Receive Queue (SRQ)
2141  *    Context: Can be called from interrupt or base context.
2142  */
2143 static ibt_status_t
2144 hermon_ci_query_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq, ibc_pd_hdl_t *pd_p,
2145     ibt_srq_sizes_t *sizes_p, uint_t *limit_p)
2146 {
2147 	hermon_srqhdl_t	srqhdl;
2148 
2149 	/* Check for valid HCA handle */
2150 	if (hca == NULL) {
2151 		return (IBT_HCA_HDL_INVALID);
2152 	}
2153 
2154 	/* Check for valid SRQ handle pointer */
2155 	if (srq == NULL) {
2156 		return (IBT_SRQ_HDL_INVALID);
2157 	}
2158 
2159 	srqhdl = (hermon_srqhdl_t)srq;
2160 
2161 	mutex_enter(&srqhdl->srq_lock);
2162 	if (srqhdl->srq_state == HERMON_SRQ_STATE_ERROR) {
2163 		mutex_exit(&srqhdl->srq_lock);
2164 		return (IBT_SRQ_ERROR_STATE);
2165 	}
2166 
2167 	*pd_p   = (ibc_pd_hdl_t)srqhdl->srq_pdhdl;
2168 	sizes_p->srq_wr_sz = srqhdl->srq_real_sizes.srq_wr_sz - 1;
2169 	sizes_p->srq_sgl_sz = srqhdl->srq_real_sizes.srq_sgl_sz;
2170 	mutex_exit(&srqhdl->srq_lock);
2171 	*limit_p  = 0;
2172 
2173 	return (IBT_SUCCESS);
2174 }
2175 
2176 /*
2177  * hermon_ci_modify_srq()
2178  *    Modify properties of a Shared Receive Queue (SRQ)
2179  *    Context: Can be called from interrupt or base context.
2180  */
2181 /* ARGSUSED */
2182 static ibt_status_t
2183 hermon_ci_modify_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
2184     ibt_srq_modify_flags_t flags, uint_t size, uint_t limit, uint_t *ret_size_p)
2185 {
2186 	hermon_state_t	*state;
2187 	hermon_srqhdl_t	srqhdl;
2188 	uint_t		resize_supported, cur_srq_size;
2189 	int		status;
2190 
2191 	/* Check for valid HCA handle */
2192 	if (hca == NULL) {
2193 		return (IBT_HCA_HDL_INVALID);
2194 	}
2195 
2196 	state = (hermon_state_t *)hca;
2197 
2198 	/* Check for valid SRQ handle pointer */
2199 	if (srq == NULL) {
2200 		return (IBT_SRQ_HDL_INVALID);
2201 	}
2202 
2203 	srqhdl = (hermon_srqhdl_t)srq;
2204 
2205 	/*
2206 	 * Check Error State of SRQ.
2207 	 * Also, while we are holding the lock we save away the current SRQ
2208 	 * size for later use.
2209 	 */
2210 	mutex_enter(&srqhdl->srq_lock);
2211 	cur_srq_size = srqhdl->srq_wq_bufsz;
2212 	if (srqhdl->srq_state == HERMON_SRQ_STATE_ERROR) {
2213 		mutex_exit(&srqhdl->srq_lock);
2214 		return (IBT_SRQ_ERROR_STATE);
2215 	}
2216 	mutex_exit(&srqhdl->srq_lock);
2217 
2218 	/*
2219 	 * Setting the limit watermark is not currently supported.  This is a
2220 	 * hermon hardware (firmware) limitation.  We return NOT_SUPPORTED here,
2221 	 * and have the limit code commented out for now.
2222 	 *
2223 	 * XXX If we enable the limit watermark support, we need to do checks
2224 	 * and set the 'srq->srq_wr_limit' here, instead of returning not
2225 	 * supported.  The 'hermon_srq_modify' operation below is for resizing
2226 	 * the SRQ only, the limit work should be done here.  If this is
2227 	 * changed to use the 'limit' field, the 'ARGSUSED' comment for this
2228 	 * function should also be removed at that time.
2229 	 */
2230 	if (flags & IBT_SRQ_SET_LIMIT) {
2231 		return (IBT_NOT_SUPPORTED);
2232 	}
2233 
2234 	/*
2235 	 * Check the SET_SIZE flag.  If not set, we simply return success here.
2236 	 * However if it is set, we check if resize is supported and only then
2237 	 * do we continue on with our resize processing.
2238 	 */
2239 	if (!(flags & IBT_SRQ_SET_SIZE)) {
2240 		return (IBT_SUCCESS);
2241 	}
2242 
2243 	resize_supported = state->hs_ibtfinfo.hca_attr->hca_flags &
2244 	    IBT_HCA_RESIZE_SRQ;
2245 
2246 	if ((flags & IBT_SRQ_SET_SIZE) && !resize_supported) {
2247 		return (IBT_NOT_SUPPORTED);
2248 	}
2249 
2250 	/*
2251 	 * We do not support resizing an SRQ to be smaller than it's current
2252 	 * size.  If a smaller (or equal) size is requested, then we simply
2253 	 * return success, and do nothing.
2254 	 */
2255 	if (size <= cur_srq_size) {
2256 		*ret_size_p = cur_srq_size;
2257 		return (IBT_SUCCESS);
2258 	}
2259 
2260 	status = hermon_srq_modify(state, srqhdl, size, ret_size_p,
2261 	    HERMON_NOSLEEP);
2262 	if (status != DDI_SUCCESS) {
2263 		/* Set return value to current SRQ size */
2264 		*ret_size_p = cur_srq_size;
2265 		return (status);
2266 	}
2267 
2268 	return (IBT_SUCCESS);
2269 }
2270 
2271 /*
2272  * hermon_ci_post_srq()
2273  *    Post a Work Request to the specified Shared Receive Queue (SRQ)
2274  *    Context: Can be called from interrupt or base context.
2275  */
2276 static ibt_status_t
2277 hermon_ci_post_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
2278     ibt_recv_wr_t *wr, uint_t num_wr, uint_t *num_posted_p)
2279 {
2280 	hermon_state_t	*state;
2281 	hermon_srqhdl_t	srqhdl;
2282 	int		status;
2283 
2284 	/* Check for valid HCA handle */
2285 	if (hca == NULL) {
2286 		return (IBT_HCA_HDL_INVALID);
2287 	}
2288 
2289 	state = (hermon_state_t *)hca;
2290 
2291 	/* Check for valid SRQ handle pointer */
2292 	if (srq == NULL) {
2293 		return (IBT_SRQ_HDL_INVALID);
2294 	}
2295 
2296 	srqhdl = (hermon_srqhdl_t)srq;
2297 
2298 	status = hermon_post_srq(state, srqhdl, wr, num_wr, num_posted_p);
2299 	return (status);
2300 }
2301 
2302 /* Address translation */
2303 /*
2304  * hermon_ci_map_mem_area()
2305  *    Context: Can be called from interrupt or base context.
2306  */
2307 /* ARGSUSED */
2308 static ibt_status_t
2309 hermon_ci_map_mem_area(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs,
2310     void *ibtl_reserved, uint_t list_len, ibt_phys_buf_t *paddr_list_p,
2311     uint_t *ret_num_paddr_p, size_t *paddr_buf_sz_p,
2312     ib_memlen_t *paddr_offset_p, ibc_ma_hdl_t *ibc_ma_hdl_p)
2313 {
2314 	hermon_state_t		*state;
2315 	uint_t			cookiecnt;
2316 	int			status;
2317 
2318 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*paddr_list_p))
2319 
2320 	/* Check for valid HCA handle */
2321 	if (hca == NULL) {
2322 		return (IBT_HCA_HDL_INVALID);
2323 	}
2324 
2325 	if ((va_attrs->va_flags & IBT_VA_BUF) && (va_attrs->va_buf == NULL)) {
2326 		return (IBT_INVALID_PARAM);
2327 	}
2328 
2329 	state = (hermon_state_t *)hca;
2330 
2331 	/*
2332 	 * Based on the length of the buffer and the paddr_list passed in,
2333 	 * retrieve DMA cookies for the virtual to physical address
2334 	 * translation.
2335 	 */
2336 	status = hermon_get_dma_cookies(state, paddr_list_p, va_attrs,
2337 	    list_len, &cookiecnt, ibc_ma_hdl_p);
2338 	if (status != DDI_SUCCESS) {
2339 		return (status);
2340 	}
2341 
2342 	/*
2343 	 * Split the cookies returned from 'hermon_get_dma_cookies() above.  We
2344 	 * also pass in the size of the cookies we would like.
2345 	 * Note: for now, we only support PAGESIZE cookies.
2346 	 */
2347 	status = hermon_split_dma_cookies(state, paddr_list_p, paddr_offset_p,
2348 	    list_len, &cookiecnt, PAGESIZE);
2349 	if (status != DDI_SUCCESS) {
2350 		return (status);
2351 	}
2352 
2353 	/*  Setup return values */
2354 	*ret_num_paddr_p = cookiecnt;
2355 	*paddr_buf_sz_p = PAGESIZE;
2356 
2357 	return (IBT_SUCCESS);
2358 }
2359 
2360 /*
2361  * hermon_ci_unmap_mem_area()
2362  * Unmap the memory area
2363  *    Context: Can be called from interrupt or base context.
2364  */
2365 /* ARGSUSED */
2366 static ibt_status_t
2367 hermon_ci_unmap_mem_area(ibc_hca_hdl_t hca, ibc_ma_hdl_t ma_hdl)
2368 {
2369 	int			status = DDI_SUCCESS;
2370 
2371 	if (ma_hdl == NULL) {
2372 		return (IBT_MI_HDL_INVALID);
2373 	}
2374 
2375 	status = hermon_free_dma_cookies(ma_hdl);
2376 	if (status != DDI_SUCCESS) {
2377 		return (ibc_get_ci_failure(0));
2378 	}
2379 	return (IBT_SUCCESS);
2380 }
2381 
2382 struct ibc_mi_s {
2383 	int			imh_len;
2384 	ddi_dma_handle_t	imh_dmahandle[1];
2385 };
2386 _NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2387     ibc_mi_s::imh_len
2388     ibc_mi_s::imh_dmahandle))
2389 
2390 
2391 /*
2392  * hermon_ci_map_mem_iov()
2393  * Map the memory
2394  *    Context: Can be called from interrupt or base context.
2395  */
2396 /* ARGSUSED */
2397 static ibt_status_t
2398 hermon_ci_map_mem_iov(ibc_hca_hdl_t hca, ibt_iov_attr_t *iov_attr,
2399     ibt_all_wr_t *wr, ibc_mi_hdl_t *mi_hdl_p)
2400 {
2401 	int			status;
2402 	int			i, nds, max_nds;
2403 	uint_t			len;
2404 	ibt_status_t		ibt_status;
2405 	ddi_dma_handle_t	dmahdl;
2406 	ddi_dma_cookie_t	dmacookie;
2407 	ddi_dma_attr_t		dma_attr;
2408 	uint_t			cookie_cnt;
2409 	ibc_mi_hdl_t		mi_hdl;
2410 	ibt_lkey_t		rsvd_lkey;
2411 	ibt_wr_ds_t		*sgl;
2412 	hermon_state_t		*state;
2413 	int			kmflag;
2414 	int			(*callback)(caddr_t);
2415 
2416 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wr))
2417 
2418 	if (mi_hdl_p == NULL)
2419 		return (IBT_MI_HDL_INVALID);
2420 
2421 	/* Check for valid HCA handle */
2422 	if (hca == NULL)
2423 		return (IBT_HCA_HDL_INVALID);
2424 
2425 	state = (hermon_state_t *)hca;
2426 	hermon_dma_attr_init(state, &dma_attr);
2427 
2428 	nds = 0;
2429 	max_nds = iov_attr->iov_wr_nds;
2430 	if (iov_attr->iov_lso_hdr_sz)
2431 		max_nds -= (iov_attr->iov_lso_hdr_sz + sizeof (uint32_t) +
2432 		    0xf) >> 4;	/* 0xf is for rounding up to a multiple of 16 */
2433 	rsvd_lkey = state->hs_devlim.rsv_lkey;
2434 	if (iov_attr->iov_flags & IBT_IOV_NOSLEEP) {
2435 		kmflag = KM_SLEEP;
2436 		callback = DDI_DMA_SLEEP;
2437 	} else {
2438 		kmflag = KM_NOSLEEP;
2439 		callback = DDI_DMA_DONTWAIT;
2440 	}
2441 
2442 	if (iov_attr->iov_flags & IBT_IOV_BUF) {
2443 		mi_hdl = kmem_alloc(sizeof (*mi_hdl), kmflag);
2444 		if (mi_hdl == NULL)
2445 			return (IBT_INSUFF_RESOURCE);
2446 		sgl = wr->send.wr_sgl;
2447 		_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
2448 
2449 		status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2450 		    callback, NULL, &dmahdl);
2451 		if (status != DDI_SUCCESS) {
2452 			kmem_free(mi_hdl, sizeof (*mi_hdl));
2453 			return (IBT_INSUFF_RESOURCE);
2454 		}
2455 		status = ddi_dma_buf_bind_handle(dmahdl, iov_attr->iov_buf,
2456 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
2457 		    &dmacookie, &cookie_cnt);
2458 		if (status != DDI_DMA_MAPPED) {
2459 			ddi_dma_free_handle(&dmahdl);
2460 			kmem_free(mi_hdl, sizeof (*mi_hdl));
2461 			return (ibc_get_ci_failure(0));
2462 		}
2463 		while (cookie_cnt-- > 0) {
2464 			if (nds >= max_nds) {
2465 				status = ddi_dma_unbind_handle(dmahdl);
2466 				if (status != DDI_SUCCESS)
2467 					HERMON_WARNING(state, "failed to "
2468 					    "unbind DMA mapping");
2469 				ddi_dma_free_handle(&dmahdl);
2470 				return (IBT_SGL_TOO_SMALL);
2471 			}
2472 			sgl[nds].ds_va = dmacookie.dmac_laddress;
2473 			sgl[nds].ds_key = rsvd_lkey;
2474 			sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
2475 			nds++;
2476 			if (cookie_cnt != 0)
2477 				ddi_dma_nextcookie(dmahdl, &dmacookie);
2478 		}
2479 		wr->send.wr_nds = nds;
2480 		mi_hdl->imh_len = 1;
2481 		mi_hdl->imh_dmahandle[0] = dmahdl;
2482 		*mi_hdl_p = mi_hdl;
2483 		return (IBT_SUCCESS);
2484 	}
2485 
2486 	if (iov_attr->iov_flags & IBT_IOV_RECV)
2487 		sgl = wr->recv.wr_sgl;
2488 	else
2489 		sgl = wr->send.wr_sgl;
2490 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
2491 
2492 	len = iov_attr->iov_list_len;
2493 	mi_hdl = kmem_alloc(sizeof (*mi_hdl) +
2494 	    (len - 1) * sizeof (ddi_dma_handle_t), kmflag);
2495 	if (mi_hdl == NULL)
2496 		return (IBT_INSUFF_RESOURCE);
2497 	for (i = 0; i < len; i++) {
2498 		status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2499 		    callback, NULL, &dmahdl);
2500 		if (status != DDI_SUCCESS) {
2501 			ibt_status = IBT_INSUFF_RESOURCE;
2502 			goto fail2;
2503 		}
2504 		status = ddi_dma_addr_bind_handle(dmahdl, iov_attr->iov_as,
2505 		    iov_attr->iov[i].iov_addr, iov_attr->iov[i].iov_len,
2506 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
2507 		    &dmacookie, &cookie_cnt);
2508 		if (status != DDI_DMA_MAPPED) {
2509 			ibt_status = ibc_get_ci_failure(0);
2510 			goto fail1;
2511 		}
2512 		if (nds + cookie_cnt >= max_nds) {
2513 			ibt_status = IBT_SGL_TOO_SMALL;
2514 			goto fail2;
2515 		}
2516 		while (cookie_cnt-- > 0) {
2517 			sgl[nds].ds_va = dmacookie.dmac_laddress;
2518 			sgl[nds].ds_key = rsvd_lkey;
2519 			sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
2520 			nds++;
2521 			if (cookie_cnt != 0)
2522 				ddi_dma_nextcookie(dmahdl, &dmacookie);
2523 		}
2524 		mi_hdl->imh_dmahandle[i] = dmahdl;
2525 	}
2526 
2527 	if (iov_attr->iov_flags & IBT_IOV_RECV)
2528 		wr->recv.wr_nds = nds;
2529 	else
2530 		wr->send.wr_nds = nds;
2531 	mi_hdl->imh_len = len;
2532 	*mi_hdl_p = mi_hdl;
2533 	return (IBT_SUCCESS);
2534 
2535 fail1:
2536 	ddi_dma_free_handle(&dmahdl);
2537 fail2:
2538 	while (--i >= 0) {
2539 		status = ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
2540 		if (status != DDI_SUCCESS)
2541 			HERMON_WARNING(state, "failed to unbind DMA mapping");
2542 		ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
2543 	}
2544 	kmem_free(mi_hdl, sizeof (*mi_hdl) +
2545 	    (len - 1) * sizeof (ddi_dma_handle_t));
2546 	*mi_hdl_p = NULL;
2547 	return (ibt_status);
2548 }
2549 
2550 /*
2551  * hermon_ci_unmap_mem_iov()
2552  * Unmap the memory
2553  *    Context: Can be called from interrupt or base context.
2554  */
2555 /* ARGSUSED */
2556 static ibt_status_t
2557 hermon_ci_unmap_mem_iov(ibc_hca_hdl_t hca, ibc_mi_hdl_t mi_hdl)
2558 {
2559 	int		status, i;
2560 	hermon_state_t	*state;
2561 
2562 	/* Check for valid HCA handle */
2563 	if (hca == NULL)
2564 		return (IBT_HCA_HDL_INVALID);
2565 
2566 	state = (hermon_state_t *)hca;
2567 
2568 	if (mi_hdl == NULL)
2569 		return (IBT_MI_HDL_INVALID);
2570 
2571 	for (i = 0; i < mi_hdl->imh_len; i++) {
2572 		status = ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
2573 		if (status != DDI_SUCCESS)
2574 			HERMON_WARNING(state, "failed to unbind DMA mapping");
2575 		ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
2576 	}
2577 	kmem_free(mi_hdl, sizeof (*mi_hdl) +
2578 	    (mi_hdl->imh_len - 1) * sizeof (ddi_dma_handle_t));
2579 	return (IBT_SUCCESS);
2580 }
2581 
2582 /* Allocate L_Key */
2583 /*
2584  * hermon_ci_alloc_lkey()
2585  */
2586 /* ARGSUSED */
2587 static ibt_status_t
2588 hermon_ci_alloc_lkey(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2589     ibt_lkey_flags_t flags, uint_t phys_buf_list_sz, ibc_mr_hdl_t *mr_p,
2590     ibt_pmr_desc_t *mem_desc_p)
2591 {
2592 	return (IBT_NOT_SUPPORTED);
2593 }
2594 
2595 /* Physical Register Memory Region */
2596 /*
2597  * hermon_ci_register_physical_mr()
2598  */
2599 /* ARGSUSED */
2600 static ibt_status_t
2601 hermon_ci_register_physical_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2602     ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
2603     ibt_pmr_desc_t *mem_desc_p)
2604 {
2605 	return (IBT_NOT_SUPPORTED);
2606 }
2607 
2608 /*
2609  * hermon_ci_reregister_physical_mr()
2610  */
2611 /* ARGSUSED */
2612 static ibt_status_t
2613 hermon_ci_reregister_physical_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
2614     ibc_pd_hdl_t pd, ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved,
2615     ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mr_desc_p)
2616 {
2617 	return (IBT_NOT_SUPPORTED);
2618 }
2619 
2620 /* Mellanox FMR Support */
2621 /*
2622  * hermon_ci_create_fmr_pool()
2623  * Creates a pool of memory regions suitable for FMR registration
2624  *    Context: Can be called from base context only
2625  */
2626 static ibt_status_t
2627 hermon_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2628     ibt_fmr_pool_attr_t *params, ibc_fmr_pool_hdl_t *fmr_pool_p)
2629 {
2630 	hermon_state_t	*state;
2631 	hermon_pdhdl_t	pdhdl;
2632 	hermon_fmrhdl_t	fmrpoolhdl;
2633 	int		status;
2634 
2635 	/* Check for valid HCA handle */
2636 	if (hca == NULL) {
2637 		return (IBT_HCA_HDL_INVALID);
2638 	}
2639 
2640 	state = (hermon_state_t *)hca;
2641 
2642 	/* Check for valid PD handle pointer */
2643 	if (pd == NULL) {
2644 		return (IBT_PD_HDL_INVALID);
2645 	}
2646 
2647 	pdhdl = (hermon_pdhdl_t)pd;
2648 
2649 	/*
2650 	 * Validate the access flags.  Both Remote Write and Remote Atomic
2651 	 * require the Local Write flag to be set
2652 	 */
2653 	if (((params->fmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
2654 	    (params->fmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
2655 	    !(params->fmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
2656 		return (IBT_MR_ACCESS_REQ_INVALID);
2657 	}
2658 
2659 	status = hermon_create_fmr_pool(state, pdhdl, params, &fmrpoolhdl);
2660 	if (status != DDI_SUCCESS) {
2661 		return (status);
2662 	}
2663 
2664 	/* Set fmr_pool from hermon handle */
2665 	*fmr_pool_p = (ibc_fmr_pool_hdl_t)fmrpoolhdl;
2666 
2667 	return (IBT_SUCCESS);
2668 }
2669 
2670 /*
2671  * hermon_ci_destroy_fmr_pool()
2672  * Free all resources associated with an FMR pool.
2673  *    Context: Can be called from base context only.
2674  */
2675 static ibt_status_t
2676 hermon_ci_destroy_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
2677 {
2678 	hermon_state_t	*state;
2679 	hermon_fmrhdl_t	fmrpoolhdl;
2680 	int		status;
2681 
2682 	/* Check for valid HCA handle */
2683 	if (hca == NULL) {
2684 		return (IBT_HCA_HDL_INVALID);
2685 	}
2686 
2687 	state = (hermon_state_t *)hca;
2688 
2689 	/* Check for valid FMR Pool handle */
2690 	if (fmr_pool == NULL) {
2691 		return (IBT_FMR_POOL_HDL_INVALID);
2692 	}
2693 
2694 	fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
2695 
2696 	status = hermon_destroy_fmr_pool(state, fmrpoolhdl);
2697 	return (status);
2698 }
2699 
2700 /*
2701  * hermon_ci_flush_fmr_pool()
2702  * Force a flush of the memory tables, cleaning up used FMR resources.
2703  *    Context: Can be called from interrupt or base context.
2704  */
2705 static ibt_status_t
2706 hermon_ci_flush_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
2707 {
2708 	hermon_state_t	*state;
2709 	hermon_fmrhdl_t	fmrpoolhdl;
2710 	int		status;
2711 
2712 	/* Check for valid HCA handle */
2713 	if (hca == NULL) {
2714 		return (IBT_HCA_HDL_INVALID);
2715 	}
2716 
2717 	state = (hermon_state_t *)hca;
2718 
2719 	/* Check for valid FMR Pool handle */
2720 	if (fmr_pool == NULL) {
2721 		return (IBT_FMR_POOL_HDL_INVALID);
2722 	}
2723 
2724 	fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
2725 
2726 	status = hermon_flush_fmr_pool(state, fmrpoolhdl);
2727 	return (status);
2728 }
2729 
2730 /*
2731  * hermon_ci_register_physical_fmr()
2732  * From the 'pool' of FMR regions passed in, performs register physical
2733  * operation.
2734  *    Context: Can be called from interrupt or base context.
2735  */
2736 /* ARGSUSED */
2737 static ibt_status_t
2738 hermon_ci_register_physical_fmr(ibc_hca_hdl_t hca,
2739     ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
2740     void *ibtl_reserved, ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mem_desc_p)
2741 {
2742 	hermon_state_t		*state;
2743 	hermon_mrhdl_t		mrhdl;
2744 	hermon_fmrhdl_t		fmrpoolhdl;
2745 	int			status;
2746 
2747 	ASSERT(mem_pattr != NULL);
2748 	ASSERT(mr_p != NULL);
2749 	ASSERT(mem_desc_p != NULL);
2750 
2751 	/* Check for valid HCA handle */
2752 	if (hca == NULL) {
2753 		return (IBT_HCA_HDL_INVALID);
2754 	}
2755 
2756 	/* Grab the Hermon softstate pointer */
2757 	state = (hermon_state_t *)hca;
2758 
2759 	/* Check for valid FMR Pool handle */
2760 	if (fmr_pool == NULL) {
2761 		return (IBT_FMR_POOL_HDL_INVALID);
2762 	}
2763 
2764 	fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
2765 
2766 	status = hermon_register_physical_fmr(state, fmrpoolhdl, mem_pattr,
2767 	    &mrhdl, mem_desc_p);
2768 	if (status != DDI_SUCCESS) {
2769 		return (status);
2770 	}
2771 
2772 	/*
2773 	 * If region is mapped for streaming (i.e. noncoherent), then set
2774 	 * sync is required
2775 	 */
2776 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mem_desc_p))
2777 	mem_desc_p->pmd_sync_required = (mrhdl->mr_bindinfo.bi_flags &
2778 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
2779 	if (mem_desc_p->pmd_sync_required == B_TRUE) {
2780 		/* Fill in DMA handle for future sync operations */
2781 		_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(mrhdl->mr_bindinfo))
2782 		mrhdl->mr_bindinfo.bi_dmahdl =
2783 		    (ddi_dma_handle_t)mem_pattr->pmr_ma;
2784 	}
2785 
2786 	/* Return the Hermon MR handle */
2787 	*mr_p = (ibc_mr_hdl_t)mrhdl;
2788 
2789 	return (IBT_SUCCESS);
2790 }
2791 
2792 /*
2793  * hermon_ci_deregister_fmr()
2794  * Moves an FMR (specified by 'mr') to the deregistered state.
2795  *    Context: Can be called from base context only.
2796  */
2797 static ibt_status_t
2798 hermon_ci_deregister_fmr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
2799 {
2800 	hermon_state_t		*state;
2801 	hermon_mrhdl_t		mrhdl;
2802 	int			status;
2803 
2804 	/* Check for valid HCA handle */
2805 	if (hca == NULL) {
2806 		return (IBT_HCA_HDL_INVALID);
2807 	}
2808 
2809 	/* Check for valid memory region handle */
2810 	if (mr == NULL) {
2811 		return (IBT_MR_HDL_INVALID);
2812 	}
2813 
2814 	/* Grab the Hermon softstate pointer */
2815 	state = (hermon_state_t *)hca;
2816 	mrhdl = (hermon_mrhdl_t)mr;
2817 
2818 	/*
2819 	 * Deregister the memory region, either "unmap" the FMR or deregister
2820 	 * the normal memory region.
2821 	 */
2822 	status = hermon_deregister_fmr(state, mrhdl);
2823 	return (status);
2824 }
2825 
2826 static int
2827 hermon_mem_alloc(hermon_state_t *state, size_t size, ibt_mr_flags_t flags,
2828     caddr_t *kaddrp, ibc_mem_alloc_hdl_t *mem_hdl)
2829 {
2830 	ddi_dma_handle_t	dma_hdl;
2831 	ddi_dma_attr_t		dma_attr;
2832 	ddi_acc_handle_t	acc_hdl;
2833 	size_t			real_len;
2834 	int			status;
2835 	int			(*ddi_cb)(caddr_t);
2836 	ibc_mem_alloc_hdl_t	mem_alloc_hdl;
2837 
2838 	hermon_dma_attr_init(state, &dma_attr);
2839 
2840 	ddi_cb = (flags & IBT_MR_NOSLEEP) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
2841 
2842 	/* Allocate a DMA handle */
2843 	status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr, ddi_cb,
2844 	    NULL, &dma_hdl);
2845 	if (status != DDI_SUCCESS) {
2846 		return (DDI_FAILURE);
2847 	}
2848 
2849 	/* Allocate DMA memory */
2850 	status = ddi_dma_mem_alloc(dma_hdl, size,
2851 	    &state->hs_reg_accattr, DDI_DMA_CONSISTENT, ddi_cb,
2852 	    NULL, kaddrp, &real_len, &acc_hdl);
2853 	if (status != DDI_SUCCESS) {
2854 		ddi_dma_free_handle(&dma_hdl);
2855 		return (DDI_FAILURE);
2856 	}
2857 
2858 	/* Package the hermon_dma_info contents and return */
2859 	mem_alloc_hdl = kmem_alloc(sizeof (**mem_hdl),
2860 	    (flags & IBT_MR_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP);
2861 	if (mem_alloc_hdl == NULL) {
2862 		ddi_dma_mem_free(&acc_hdl);
2863 		ddi_dma_free_handle(&dma_hdl);
2864 		return (DDI_FAILURE);
2865 	}
2866 	mem_alloc_hdl->ibc_dma_hdl = dma_hdl;
2867 	mem_alloc_hdl->ibc_acc_hdl = acc_hdl;
2868 
2869 	*mem_hdl = mem_alloc_hdl;
2870 
2871 	return (DDI_SUCCESS);
2872 }
2873 
2874 /*
2875  * hermon_ci_alloc_io_mem()
2876  *	Allocate dma-able memory
2877  *
2878  */
2879 static ibt_status_t
2880 hermon_ci_alloc_io_mem(ibc_hca_hdl_t hca, size_t size, ibt_mr_flags_t mr_flag,
2881     caddr_t *kaddrp, ibc_mem_alloc_hdl_t *mem_alloc_hdl_p)
2882 {
2883 	hermon_state_t	*state;
2884 	int		status;
2885 
2886 	/* Check for valid HCA handle */
2887 	if (hca == NULL) {
2888 		return (IBT_HCA_HDL_INVALID);
2889 	}
2890 
2891 	/* Check for valid mem_alloc_hdl_p handle pointer */
2892 	if (mem_alloc_hdl_p == NULL) {
2893 		return (IBT_MEM_ALLOC_HDL_INVALID);
2894 	}
2895 
2896 	/* Grab the Hermon softstate pointer and mem handle */
2897 	state = (hermon_state_t *)hca;
2898 
2899 	/* Allocate the memory and handles */
2900 	status = hermon_mem_alloc(state, size, mr_flag, kaddrp,
2901 	    mem_alloc_hdl_p);
2902 
2903 	if (status != DDI_SUCCESS) {
2904 		*mem_alloc_hdl_p = NULL;
2905 		*kaddrp = NULL;
2906 		return (status);
2907 	}
2908 
2909 	return (IBT_SUCCESS);
2910 }
2911 
2912 
2913 /*
2914  * hermon_ci_free_io_mem()
2915  * Unbind handl and free the memory
2916  */
2917 static ibt_status_t
2918 hermon_ci_free_io_mem(ibc_hca_hdl_t hca, ibc_mem_alloc_hdl_t mem_alloc_hdl)
2919 {
2920 	/* Check for valid HCA handle */
2921 	if (hca == NULL) {
2922 		return (IBT_HCA_HDL_INVALID);
2923 	}
2924 
2925 	/* Check for valid mem_alloc_hdl handle pointer */
2926 	if (mem_alloc_hdl == NULL) {
2927 		return (IBT_MEM_ALLOC_HDL_INVALID);
2928 	}
2929 
2930 	/* Unbind the handles and free the memory */
2931 	(void) ddi_dma_unbind_handle(mem_alloc_hdl->ibc_dma_hdl);
2932 	ddi_dma_mem_free(&mem_alloc_hdl->ibc_acc_hdl);
2933 	ddi_dma_free_handle(&mem_alloc_hdl->ibc_dma_hdl);
2934 	kmem_free(mem_alloc_hdl, sizeof (*mem_alloc_hdl));
2935 
2936 	return (IBT_SUCCESS);
2937 }
2938