xref: /titanic_41/usr/src/uts/common/io/ib/adapters/tavor/tavor_ci.c (revision 6a634c9dca3093f3922e4b7ab826d7bdf17bf78e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  * tavor_ci.c
28  *    Tavor Channel Interface (CI) Routines
29  *
30  *    Implements all the routines necessary to interface with the IBTF.
31  *    Pointers to all of these functions are passed to the IBTF at attach()
32  *    time in the ibc_operations_t structure.  These functions include all
33  *    of the necessary routines to implement the required InfiniBand "verbs"
34  *    and additional IBTF-specific interfaces.
35  */
36 
37 #include <sys/types.h>
38 #include <sys/conf.h>
39 #include <sys/ddi.h>
40 #include <sys/sunddi.h>
41 
42 #include <sys/ib/adapters/tavor/tavor.h>
43 
44 /* HCA and port related operations */
45 static ibt_status_t tavor_ci_query_hca_ports(ibc_hca_hdl_t, uint8_t,
46     ibt_hca_portinfo_t *);
47 static ibt_status_t tavor_ci_modify_ports(ibc_hca_hdl_t, uint8_t,
48     ibt_port_modify_flags_t, uint8_t);
49 static ibt_status_t tavor_ci_modify_system_image(ibc_hca_hdl_t, ib_guid_t);
50 
51 /* Protection Domains */
52 static ibt_status_t tavor_ci_alloc_pd(ibc_hca_hdl_t, ibt_pd_flags_t,
53     ibc_pd_hdl_t *);
54 static ibt_status_t tavor_ci_free_pd(ibc_hca_hdl_t, ibc_pd_hdl_t);
55 
56 /* Reliable Datagram Domains */
57 static ibt_status_t tavor_ci_alloc_rdd(ibc_hca_hdl_t, ibc_rdd_flags_t,
58     ibc_rdd_hdl_t *);
59 static ibt_status_t tavor_ci_free_rdd(ibc_hca_hdl_t, ibc_rdd_hdl_t);
60 
61 /* Address Handles */
62 static ibt_status_t tavor_ci_alloc_ah(ibc_hca_hdl_t, ibt_ah_flags_t,
63     ibc_pd_hdl_t, ibt_adds_vect_t *, ibc_ah_hdl_t *);
64 static ibt_status_t tavor_ci_free_ah(ibc_hca_hdl_t, ibc_ah_hdl_t);
65 static ibt_status_t tavor_ci_query_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
66     ibc_pd_hdl_t *, ibt_adds_vect_t *);
67 static ibt_status_t tavor_ci_modify_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
68     ibt_adds_vect_t *);
69 
70 /* Queue Pairs */
71 static ibt_status_t tavor_ci_alloc_qp(ibc_hca_hdl_t, ibtl_qp_hdl_t,
72     ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *, ib_qpn_t *,
73     ibc_qp_hdl_t *);
74 static ibt_status_t tavor_ci_alloc_special_qp(ibc_hca_hdl_t, uint8_t,
75     ibtl_qp_hdl_t, ibt_sqp_type_t, ibt_qp_alloc_attr_t *,
76     ibt_chan_sizes_t *, ibc_qp_hdl_t *);
77 static ibt_status_t tavor_ci_alloc_qp_range(ibc_hca_hdl_t, uint_t,
78     ibtl_qp_hdl_t *, ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *,
79     ibc_cq_hdl_t *, ibc_cq_hdl_t *, ib_qpn_t *, ibc_qp_hdl_t *);
80 static ibt_status_t tavor_ci_free_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
81     ibc_free_qp_flags_t, ibc_qpn_hdl_t *);
82 static ibt_status_t tavor_ci_release_qpn(ibc_hca_hdl_t, ibc_qpn_hdl_t);
83 static ibt_status_t tavor_ci_query_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
84     ibt_qp_query_attr_t *);
85 static ibt_status_t tavor_ci_modify_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
86     ibt_cep_modify_flags_t, ibt_qp_info_t *, ibt_queue_sizes_t *);
87 
88 /* Completion Queues */
89 static ibt_status_t tavor_ci_alloc_cq(ibc_hca_hdl_t, ibt_cq_hdl_t,
90     ibt_cq_attr_t *, ibc_cq_hdl_t *, uint_t *);
91 static ibt_status_t tavor_ci_free_cq(ibc_hca_hdl_t, ibc_cq_hdl_t);
92 static ibt_status_t tavor_ci_query_cq(ibc_hca_hdl_t, ibc_cq_hdl_t, uint_t *,
93     uint_t *, uint_t *, ibt_cq_handler_id_t *);
94 static ibt_status_t tavor_ci_resize_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
95     uint_t, uint_t *);
96 static ibt_status_t tavor_ci_modify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
97     uint_t, uint_t, ibt_cq_handler_id_t);
98 static ibt_status_t tavor_ci_alloc_cq_sched(ibc_hca_hdl_t,
99     ibt_cq_sched_attr_t *, ibc_sched_hdl_t *);
100 static ibt_status_t tavor_ci_free_cq_sched(ibc_hca_hdl_t, ibc_sched_hdl_t);
101 
102 /* EE Contexts */
103 static ibt_status_t tavor_ci_alloc_eec(ibc_hca_hdl_t, ibc_eec_flags_t,
104     ibt_eec_hdl_t, ibc_rdd_hdl_t, ibc_eec_hdl_t *);
105 static ibt_status_t tavor_ci_free_eec(ibc_hca_hdl_t, ibc_eec_hdl_t);
106 static ibt_status_t tavor_ci_query_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
107     ibt_eec_query_attr_t *);
108 static ibt_status_t tavor_ci_modify_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
109     ibt_cep_modify_flags_t, ibt_eec_info_t *);
110 
111 /* Memory Registration */
112 static ibt_status_t tavor_ci_register_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
113     ibt_mr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *);
114 static ibt_status_t tavor_ci_register_buf(ibc_hca_hdl_t, ibc_pd_hdl_t,
115     ibt_smr_attr_t *, struct buf *, void *, ibt_mr_hdl_t *, ibt_mr_desc_t *);
116 static ibt_status_t tavor_ci_register_shared_mr(ibc_hca_hdl_t,
117     ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_smr_attr_t *, void *,
118     ibc_mr_hdl_t *, ibt_mr_desc_t *);
119 static ibt_status_t tavor_ci_deregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t);
120 static ibt_status_t tavor_ci_query_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
121     ibt_mr_query_attr_t *);
122 static ibt_status_t tavor_ci_reregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
123     ibc_pd_hdl_t, ibt_mr_attr_t *, void *, ibc_mr_hdl_t *,
124     ibt_mr_desc_t *);
125 static ibt_status_t tavor_ci_reregister_buf(ibc_hca_hdl_t, ibc_mr_hdl_t,
126     ibc_pd_hdl_t, ibt_smr_attr_t *, struct buf *, void *, ibc_mr_hdl_t *,
127     ibt_mr_desc_t *);
128 static ibt_status_t tavor_ci_sync_mr(ibc_hca_hdl_t, ibt_mr_sync_t *, size_t);
129 static ibt_status_t tavor_ci_register_dma_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
130     ibt_dmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *);
131 
132 /* Memory Windows */
133 static ibt_status_t tavor_ci_alloc_mw(ibc_hca_hdl_t, ibc_pd_hdl_t,
134     ibt_mw_flags_t, ibc_mw_hdl_t *, ibt_rkey_t *);
135 static ibt_status_t tavor_ci_free_mw(ibc_hca_hdl_t, ibc_mw_hdl_t);
136 static ibt_status_t tavor_ci_query_mw(ibc_hca_hdl_t, ibc_mw_hdl_t,
137     ibt_mw_query_attr_t *);
138 
139 /* Multicast Groups */
140 static ibt_status_t tavor_ci_attach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
141     ib_gid_t, ib_lid_t);
142 static ibt_status_t tavor_ci_detach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
143     ib_gid_t, ib_lid_t);
144 
145 /* Work Request and Completion Processing */
146 static ibt_status_t tavor_ci_post_send(ibc_hca_hdl_t, ibc_qp_hdl_t,
147     ibt_send_wr_t *, uint_t, uint_t *);
148 static ibt_status_t tavor_ci_post_recv(ibc_hca_hdl_t, ibc_qp_hdl_t,
149     ibt_recv_wr_t *, uint_t, uint_t *);
150 static ibt_status_t tavor_ci_poll_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
151     ibt_wc_t *, uint_t, uint_t *);
152 static ibt_status_t tavor_ci_notify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
153     ibt_cq_notify_flags_t);
154 
155 /* CI Object Private Data */
156 static ibt_status_t tavor_ci_ci_data_in(ibc_hca_hdl_t, ibt_ci_data_flags_t,
157     ibt_object_type_t, void *, void *, size_t);
158 
159 /* CI Object Private Data */
160 static ibt_status_t tavor_ci_ci_data_out(ibc_hca_hdl_t, ibt_ci_data_flags_t,
161     ibt_object_type_t, void *, void *, size_t);
162 
163 /* Shared Receive Queues */
164 static ibt_status_t tavor_ci_alloc_srq(ibc_hca_hdl_t, ibt_srq_flags_t,
165     ibt_srq_hdl_t, ibc_pd_hdl_t, ibt_srq_sizes_t *, ibc_srq_hdl_t *,
166     ibt_srq_sizes_t *);
167 static ibt_status_t tavor_ci_free_srq(ibc_hca_hdl_t, ibc_srq_hdl_t);
168 static ibt_status_t tavor_ci_query_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
169     ibc_pd_hdl_t *, ibt_srq_sizes_t *, uint_t *);
170 static ibt_status_t tavor_ci_modify_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
171     ibt_srq_modify_flags_t, uint_t, uint_t, uint_t *);
172 static ibt_status_t tavor_ci_post_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
173     ibt_recv_wr_t *, uint_t, uint_t *);
174 
175 /* Address translation */
176 static ibt_status_t tavor_ci_map_mem_area(ibc_hca_hdl_t, ibt_va_attr_t *,
177     void *, uint_t, ibt_reg_req_t *, ibc_ma_hdl_t *);
178 static ibt_status_t tavor_ci_unmap_mem_area(ibc_hca_hdl_t, ibc_ma_hdl_t);
179 static ibt_status_t tavor_ci_map_mem_iov(ibc_hca_hdl_t, ibt_iov_attr_t *,
180     ibt_all_wr_t *, ibc_mi_hdl_t *);
181 static ibt_status_t tavor_ci_unmap_mem_iov(ibc_hca_hdl_t, ibc_mi_hdl_t);
182 
183 /* Allocate L_Key */
184 static ibt_status_t tavor_ci_alloc_lkey(ibc_hca_hdl_t, ibc_pd_hdl_t,
185     ibt_lkey_flags_t, uint_t, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
186 
187 /* Physical Register Memory Region */
188 static ibt_status_t tavor_ci_register_physical_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
189     ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
190 static ibt_status_t tavor_ci_reregister_physical_mr(ibc_hca_hdl_t,
191     ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *,
192     ibt_pmr_desc_t *);
193 
194 /* Mellanox FMR */
195 static ibt_status_t tavor_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
196     ibt_fmr_pool_attr_t *fmr_params, ibc_fmr_pool_hdl_t *fmr_pool);
197 static ibt_status_t tavor_ci_destroy_fmr_pool(ibc_hca_hdl_t hca,
198     ibc_fmr_pool_hdl_t fmr_pool);
199 static ibt_status_t tavor_ci_flush_fmr_pool(ibc_hca_hdl_t hca,
200     ibc_fmr_pool_hdl_t fmr_pool);
201 static ibt_status_t tavor_ci_register_physical_fmr(ibc_hca_hdl_t hca,
202     ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
203     void *ibtl_reserved, ibc_mr_hdl_t *mr_hdl_p, ibt_pmr_desc_t *mem_desc_p);
204 static ibt_status_t tavor_ci_deregister_fmr(ibc_hca_hdl_t hca,
205     ibc_mr_hdl_t mr);
206 
207 static ibt_status_t tavor_ci_alloc_io_mem(ibc_hca_hdl_t, size_t,
208     ibt_mr_flags_t, caddr_t *, ibc_mem_alloc_hdl_t *);
209 static ibt_status_t tavor_ci_free_io_mem(ibc_hca_hdl_t, ibc_mem_alloc_hdl_t);
210 static int tavor_mem_alloc(tavor_state_t *, size_t, ibt_mr_flags_t,
211 	caddr_t *, tavor_mem_alloc_hdl_t *);
212 
213 static ibt_status_t tavor_ci_not_supported();
214 
215 /*
216  * This ibc_operations_t structure includes pointers to all the entry points
217  * provided by the Tavor driver.  This structure is passed to the IBTF at
218  * driver attach time, using the ibc_attach() call.
219  */
220 ibc_operations_t tavor_ibc_ops = {
221 	/* HCA and port related operations */
222 	tavor_ci_query_hca_ports,
223 	tavor_ci_modify_ports,
224 	tavor_ci_modify_system_image,
225 
226 	/* Protection Domains */
227 	tavor_ci_alloc_pd,
228 	tavor_ci_free_pd,
229 
230 	/* Reliable Datagram Domains */
231 	tavor_ci_alloc_rdd,
232 	tavor_ci_free_rdd,
233 
234 	/* Address Handles */
235 	tavor_ci_alloc_ah,
236 	tavor_ci_free_ah,
237 	tavor_ci_query_ah,
238 	tavor_ci_modify_ah,
239 
240 	/* Queue Pairs */
241 	tavor_ci_alloc_qp,
242 	tavor_ci_alloc_special_qp,
243 	tavor_ci_alloc_qp_range,
244 	tavor_ci_free_qp,
245 	tavor_ci_release_qpn,
246 	tavor_ci_query_qp,
247 	tavor_ci_modify_qp,
248 
249 	/* Completion Queues */
250 	tavor_ci_alloc_cq,
251 	tavor_ci_free_cq,
252 	tavor_ci_query_cq,
253 	tavor_ci_resize_cq,
254 	tavor_ci_modify_cq,
255 	tavor_ci_alloc_cq_sched,
256 	tavor_ci_free_cq_sched,
257 	tavor_ci_not_supported,	/* query_cq_handler_id */
258 
259 	/* EE Contexts */
260 	tavor_ci_alloc_eec,
261 	tavor_ci_free_eec,
262 	tavor_ci_query_eec,
263 	tavor_ci_modify_eec,
264 
265 	/* Memory Registration */
266 	tavor_ci_register_mr,
267 	tavor_ci_register_buf,
268 	tavor_ci_register_shared_mr,
269 	tavor_ci_deregister_mr,
270 	tavor_ci_query_mr,
271 	tavor_ci_reregister_mr,
272 	tavor_ci_reregister_buf,
273 	tavor_ci_sync_mr,
274 
275 	/* Memory Windows */
276 	tavor_ci_alloc_mw,
277 	tavor_ci_free_mw,
278 	tavor_ci_query_mw,
279 
280 	/* Multicast Groups */
281 	tavor_ci_attach_mcg,
282 	tavor_ci_detach_mcg,
283 
284 	/* Work Request and Completion Processing */
285 	tavor_ci_post_send,
286 	tavor_ci_post_recv,
287 	tavor_ci_poll_cq,
288 	tavor_ci_notify_cq,
289 
290 	/* CI Object Mapping Data */
291 	tavor_ci_ci_data_in,
292 	tavor_ci_ci_data_out,
293 
294 	/* Shared Receive Queue */
295 	tavor_ci_alloc_srq,
296 	tavor_ci_free_srq,
297 	tavor_ci_query_srq,
298 	tavor_ci_modify_srq,
299 	tavor_ci_post_srq,
300 
301 	/* Address translation */
302 	tavor_ci_map_mem_area,
303 	tavor_ci_unmap_mem_area,
304 	tavor_ci_map_mem_iov,
305 	tavor_ci_unmap_mem_iov,
306 
307 	/* Allocate L_key */
308 	tavor_ci_alloc_lkey,
309 
310 	/* Physical Register Memory Region */
311 	tavor_ci_register_physical_mr,
312 	tavor_ci_reregister_physical_mr,
313 
314 	/* Mellanox FMR */
315 	tavor_ci_create_fmr_pool,
316 	tavor_ci_destroy_fmr_pool,
317 	tavor_ci_flush_fmr_pool,
318 	tavor_ci_register_physical_fmr,
319 	tavor_ci_deregister_fmr,
320 
321 	/* dmable memory */
322 	tavor_ci_alloc_io_mem,
323 	tavor_ci_free_io_mem,
324 
325 	/* XRC not yet supported */
326 	tavor_ci_not_supported,	/* ibc_alloc_xrc_domain */
327 	tavor_ci_not_supported,	/* ibc_free_xrc_domain */
328 	tavor_ci_not_supported,	/* ibc_alloc_xrc_srq */
329 	tavor_ci_not_supported,	/* ibc_free_xrc_srq */
330 	tavor_ci_not_supported,	/* ibc_query_xrc_srq */
331 	tavor_ci_not_supported,	/* ibc_modify_xrc_srq */
332 	tavor_ci_not_supported,	/* ibc_alloc_xrc_tgt_qp */
333 	tavor_ci_not_supported,	/* ibc_free_xrc_tgt_qp */
334 	tavor_ci_not_supported,	/* ibc_query_xrc_tgt_qp */
335 	tavor_ci_not_supported,	/* ibc_modify_xrc_tgt_qp */
336 
337 	/* Memory Region (physical) */
338 	tavor_ci_register_dma_mr,
339 
340 	/* Next enhancements */
341 	tavor_ci_not_supported,	/* ibc_enhancement1 */
342 	tavor_ci_not_supported,	/* ibc_enhancement2 */
343 	tavor_ci_not_supported,	/* ibc_enhancement3 */
344 	tavor_ci_not_supported,	/* ibc_enhancement4 */
345 };
346 
347 /*
348  * Not yet implemented OPS
349  */
350 /* ARGSUSED */
351 static ibt_status_t
tavor_ci_not_supported()352 tavor_ci_not_supported()
353 {
354 	return (IBT_NOT_SUPPORTED);
355 }
356 
357 
358 /*
359  * tavor_ci_query_hca_ports()
360  *    Returns HCA port attributes for either one or all of the HCA's ports.
361  *    Context: Can be called only from user or kernel context.
362  */
363 static ibt_status_t
tavor_ci_query_hca_ports(ibc_hca_hdl_t hca,uint8_t query_port,ibt_hca_portinfo_t * info_p)364 tavor_ci_query_hca_ports(ibc_hca_hdl_t hca, uint8_t query_port,
365     ibt_hca_portinfo_t *info_p)
366 {
367 	tavor_state_t	*state;
368 	uint_t		start, end, port;
369 	int		status, indx;
370 
371 	TAVOR_TNF_ENTER(tavor_ci_query_hca_ports);
372 
373 	/* Check for valid HCA handle */
374 	if (hca == NULL) {
375 		TNF_PROBE_0(tavor_ci_query_hca_ports_invhca_fail,
376 		    TAVOR_TNF_ERROR, "");
377 		TAVOR_TNF_EXIT(tavor_ci_query_port);
378 		return (IBT_HCA_HDL_INVALID);
379 	}
380 
381 	/* Grab the Tavor softstate pointer */
382 	state = (tavor_state_t *)hca;
383 
384 	/*
385 	 * If the specified port is zero, then we are supposed to query all
386 	 * ports.  Otherwise, we query only the port number specified.
387 	 * Setup the start and end port numbers as appropriate for the loop
388 	 * below.  Note:  The first Tavor port is port number one (1).
389 	 */
390 	if (query_port == 0) {
391 		start = 1;
392 		end = start + (state->ts_cfg_profile->cp_num_ports - 1);
393 	} else {
394 		end = start = query_port;
395 	}
396 
397 	/* Query the port(s) */
398 	for (port = start, indx = 0; port <= end; port++, indx++) {
399 		status = tavor_port_query(state, port, &info_p[indx]);
400 		if (status != DDI_SUCCESS) {
401 			TNF_PROBE_1(tavor_port_query_fail, TAVOR_TNF_ERROR,
402 			    "", tnf_uint, status, status);
403 			TAVOR_TNF_EXIT(tavor_ci_query_hca_ports);
404 			return (status);
405 		}
406 	}
407 
408 	TAVOR_TNF_EXIT(tavor_ci_query_hca_ports);
409 	return (IBT_SUCCESS);
410 }
411 
412 
413 /*
414  * tavor_ci_modify_ports()
415  *    Modify HCA port attributes
416  *    Context: Can be called only from user or kernel context.
417  */
418 static ibt_status_t
tavor_ci_modify_ports(ibc_hca_hdl_t hca,uint8_t port,ibt_port_modify_flags_t flags,uint8_t init_type)419 tavor_ci_modify_ports(ibc_hca_hdl_t hca, uint8_t port,
420     ibt_port_modify_flags_t flags, uint8_t init_type)
421 {
422 	tavor_state_t	*state;
423 	int		status;
424 
425 	TAVOR_TNF_ENTER(tavor_ci_modify_ports);
426 
427 	/* Check for valid HCA handle */
428 	if (hca == NULL) {
429 		TNF_PROBE_0(tavor_ci_modify_ports_invhca_fail,
430 		    TAVOR_TNF_ERROR, "");
431 		TAVOR_TNF_EXIT(tavor_ci_modify_ports);
432 		return (IBT_HCA_HDL_INVALID);
433 	}
434 
435 	/* Grab the Tavor softstate pointer */
436 	state = (tavor_state_t *)hca;
437 
438 	/* Modify the port(s) */
439 	status = tavor_port_modify(state, port, flags, init_type);
440 	if (status != DDI_SUCCESS) {
441 		TNF_PROBE_1(tavor_ci_modify_ports_fail,
442 		    TAVOR_TNF_ERROR, "", tnf_uint, status, status);
443 		TAVOR_TNF_EXIT(tavor_ci_modify_ports);
444 		return (status);
445 	}
446 
447 	TAVOR_TNF_EXIT(tavor_ci_modify_ports);
448 	return (IBT_SUCCESS);
449 }
450 
451 /*
452  * tavor_ci_modify_system_image()
453  *    Modify the System Image GUID
454  *    Context: Can be called only from user or kernel context.
455  */
456 /* ARGSUSED */
457 static ibt_status_t
tavor_ci_modify_system_image(ibc_hca_hdl_t hca,ib_guid_t sys_guid)458 tavor_ci_modify_system_image(ibc_hca_hdl_t hca, ib_guid_t sys_guid)
459 {
460 	TAVOR_TNF_ENTER(tavor_ci_modify_system_image);
461 
462 	/*
463 	 * This is an unsupported interface for the Tavor driver.  This
464 	 * interface is necessary to support modification of the System
465 	 * Image GUID.  Tavor is only capable of modifying this parameter
466 	 * once (during driver initialization).
467 	 */
468 
469 	TAVOR_TNF_EXIT(tavor_ci_modify_system_image);
470 	return (IBT_NOT_SUPPORTED);
471 }
472 
473 /*
474  * tavor_ci_alloc_pd()
475  *    Allocate a Protection Domain
476  *    Context: Can be called only from user or kernel context.
477  */
478 /* ARGSUSED */
479 static ibt_status_t
tavor_ci_alloc_pd(ibc_hca_hdl_t hca,ibt_pd_flags_t flags,ibc_pd_hdl_t * pd_p)480 tavor_ci_alloc_pd(ibc_hca_hdl_t hca, ibt_pd_flags_t flags, ibc_pd_hdl_t *pd_p)
481 {
482 	tavor_state_t	*state;
483 	tavor_pdhdl_t	pdhdl;
484 	int		status;
485 
486 	TAVOR_TNF_ENTER(tavor_ci_alloc_pd);
487 
488 	ASSERT(pd_p != NULL);
489 
490 	/* Check for valid HCA handle */
491 	if (hca == NULL) {
492 		TNF_PROBE_0(tavor_ci_alloc_pd_invhca_fail,
493 		    TAVOR_TNF_ERROR, "");
494 		TAVOR_TNF_EXIT(tavor_ci_alloc_pd);
495 		return (IBT_HCA_HDL_INVALID);
496 	}
497 
498 	/* Grab the Tavor softstate pointer */
499 	state = (tavor_state_t *)hca;
500 
501 	/* Allocate the PD */
502 	status = tavor_pd_alloc(state, &pdhdl, TAVOR_NOSLEEP);
503 	if (status != DDI_SUCCESS) {
504 		TNF_PROBE_1(tavor_ci_alloc_pd_fail, TAVOR_TNF_ERROR, "",
505 		    tnf_uint, status, status);
506 		TAVOR_TNF_EXIT(tavor_ci_alloc_pd);
507 		return (status);
508 	}
509 
510 	/* Return the Tavor PD handle */
511 	*pd_p = (ibc_pd_hdl_t)pdhdl;
512 
513 	TAVOR_TNF_EXIT(tavor_ci_alloc_pd);
514 	return (IBT_SUCCESS);
515 }
516 
517 
518 /*
519  * tavor_ci_free_pd()
520  *    Free a Protection Domain
521  *    Context: Can be called only from user or kernel context
522  */
523 static ibt_status_t
tavor_ci_free_pd(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd)524 tavor_ci_free_pd(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd)
525 {
526 	tavor_state_t		*state;
527 	tavor_pdhdl_t		pdhdl;
528 	int			status;
529 
530 	TAVOR_TNF_ENTER(tavor_ci_free_pd);
531 
532 	/* Check for valid HCA handle */
533 	if (hca == NULL) {
534 		TNF_PROBE_0(tavor_ci_free_pd_invhca_fail,
535 		    TAVOR_TNF_ERROR, "");
536 		TAVOR_TNF_EXIT(tavor_ci_free_pd);
537 		return (IBT_HCA_HDL_INVALID);
538 	}
539 
540 	/* Check for valid PD handle pointer */
541 	if (pd == NULL) {
542 		TNF_PROBE_0(tavor_ci_free_pd_invpdhdl_fail,
543 		    TAVOR_TNF_ERROR, "");
544 		TAVOR_TNF_EXIT(tavor_ci_free_pd);
545 		return (IBT_PD_HDL_INVALID);
546 	}
547 
548 	/* Grab the Tavor softstate pointer and PD handle */
549 	state = (tavor_state_t *)hca;
550 	pdhdl = (tavor_pdhdl_t)pd;
551 
552 	/* Free the PD */
553 	status = tavor_pd_free(state, &pdhdl);
554 	if (status != DDI_SUCCESS) {
555 		TNF_PROBE_1(tavor_ci_free_pd_fail, TAVOR_TNF_ERROR, "",
556 		    tnf_uint, status, status);
557 		TAVOR_TNF_EXIT(tavor_ci_free_pd);
558 		return (status);
559 	}
560 
561 	TAVOR_TNF_EXIT(tavor_ci_free_pd);
562 	return (IBT_SUCCESS);
563 }
564 
565 
566 /*
567  * tavor_ci_alloc_rdd()
568  *    Allocate a Reliable Datagram Domain
569  *    Context: Can be called only from user or kernel context.
570  */
571 /* ARGSUSED */
572 static ibt_status_t
tavor_ci_alloc_rdd(ibc_hca_hdl_t hca,ibc_rdd_flags_t flags,ibc_rdd_hdl_t * rdd_p)573 tavor_ci_alloc_rdd(ibc_hca_hdl_t hca, ibc_rdd_flags_t flags,
574     ibc_rdd_hdl_t *rdd_p)
575 {
576 	TAVOR_TNF_ENTER(tavor_ci_alloc_rdd);
577 
578 	/*
579 	 * This is an unsupported interface for the Tavor driver.  This
580 	 * interface is necessary to support Reliable Datagram (RD)
581 	 * operations.  Tavor does not support RD.
582 	 */
583 
584 	TAVOR_TNF_EXIT(tavor_ci_alloc_rdd);
585 	return (IBT_NOT_SUPPORTED);
586 }
587 
588 
589 /*
590  * tavor_free_rdd()
591  *    Free a Reliable Datagram Domain
592  *    Context: Can be called only from user or kernel context.
593  */
594 /* ARGSUSED */
595 static ibt_status_t
tavor_ci_free_rdd(ibc_hca_hdl_t hca,ibc_rdd_hdl_t rdd)596 tavor_ci_free_rdd(ibc_hca_hdl_t hca, ibc_rdd_hdl_t rdd)
597 {
598 	TAVOR_TNF_ENTER(tavor_ci_free_rdd);
599 
600 	/*
601 	 * This is an unsupported interface for the Tavor driver.  This
602 	 * interface is necessary to support Reliable Datagram (RD)
603 	 * operations.  Tavor does not support RD.
604 	 */
605 
606 	TAVOR_TNF_EXIT(tavor_ci_free_rdd);
607 	return (IBT_NOT_SUPPORTED);
608 }
609 
610 
611 /*
612  * tavor_ci_alloc_ah()
613  *    Allocate an Address Handle
614  *    Context: Can be called only from user or kernel context.
615  */
616 /* ARGSUSED */
617 static ibt_status_t
tavor_ci_alloc_ah(ibc_hca_hdl_t hca,ibt_ah_flags_t flags,ibc_pd_hdl_t pd,ibt_adds_vect_t * attr_p,ibc_ah_hdl_t * ah_p)618 tavor_ci_alloc_ah(ibc_hca_hdl_t hca, ibt_ah_flags_t flags, ibc_pd_hdl_t pd,
619     ibt_adds_vect_t *attr_p, ibc_ah_hdl_t *ah_p)
620 {
621 	tavor_state_t	*state;
622 	tavor_ahhdl_t	ahhdl;
623 	tavor_pdhdl_t	pdhdl;
624 	int		status;
625 
626 	TAVOR_TNF_ENTER(tavor_ci_alloc_ah);
627 
628 	/* Check for valid HCA handle */
629 	if (hca == NULL) {
630 		TNF_PROBE_0(tavor_ci_alloc_ah_invhca_fail,
631 		    TAVOR_TNF_ERROR, "");
632 		TAVOR_TNF_EXIT(tavor_ci_alloc_ah);
633 		return (IBT_HCA_HDL_INVALID);
634 	}
635 
636 	/* Check for valid PD handle pointer */
637 	if (pd == NULL) {
638 		TNF_PROBE_0(tavor_ci_alloc_ah_invpdhdl_fail,
639 		    TAVOR_TNF_ERROR, "");
640 		TAVOR_TNF_EXIT(tavor_ci_alloc_ah);
641 		return (IBT_PD_HDL_INVALID);
642 	}
643 
644 	/* Grab the Tavor softstate pointer and PD handle */
645 	state = (tavor_state_t *)hca;
646 	pdhdl = (tavor_pdhdl_t)pd;
647 
648 	/* Allocate the AH */
649 	status = tavor_ah_alloc(state, pdhdl, attr_p, &ahhdl, TAVOR_NOSLEEP);
650 	if (status != DDI_SUCCESS) {
651 		TNF_PROBE_1(tavor_ci_alloc_ah_fail, TAVOR_TNF_ERROR, "",
652 		    tnf_uint, status, status);
653 		TAVOR_TNF_EXIT(tavor_ci_alloc_ah);
654 		return (status);
655 	}
656 
657 	/* Return the Tavor AH handle */
658 	*ah_p = (ibc_ah_hdl_t)ahhdl;
659 
660 	TAVOR_TNF_EXIT(tavor_ci_alloc_ah);
661 	return (IBT_SUCCESS);
662 }
663 
664 
665 /*
666  * tavor_ci_free_ah()
667  *    Free an Address Handle
668  *    Context: Can be called only from user or kernel context.
669  */
670 static ibt_status_t
tavor_ci_free_ah(ibc_hca_hdl_t hca,ibc_ah_hdl_t ah)671 tavor_ci_free_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah)
672 {
673 	tavor_state_t	*state;
674 	tavor_ahhdl_t	ahhdl;
675 	int		status;
676 
677 	TAVOR_TNF_ENTER(tavor_ci_free_ah);
678 
679 	/* Check for valid HCA handle */
680 	if (hca == NULL) {
681 		TNF_PROBE_0(tavor_ci_free_ah_invhca_fail,
682 		    TAVOR_TNF_ERROR, "");
683 		TAVOR_TNF_EXIT(tavor_ci_free_ah);
684 		return (IBT_HCA_HDL_INVALID);
685 	}
686 
687 	/* Check for valid address handle pointer */
688 	if (ah == NULL) {
689 		TNF_PROBE_0(tavor_ci_free_ah_invahhdl_fail,
690 		    TAVOR_TNF_ERROR, "");
691 		TAVOR_TNF_EXIT(tavor_ci_free_ah);
692 		return (IBT_AH_HDL_INVALID);
693 	}
694 
695 	/* Grab the Tavor softstate pointer and AH handle */
696 	state = (tavor_state_t *)hca;
697 	ahhdl = (tavor_ahhdl_t)ah;
698 
699 	/* Free the AH */
700 	status = tavor_ah_free(state, &ahhdl, TAVOR_NOSLEEP);
701 	if (status != DDI_SUCCESS) {
702 		TNF_PROBE_1(tavor_ci_free_ah_fail, TAVOR_TNF_ERROR, "",
703 		    tnf_uint, status, status);
704 		TAVOR_TNF_EXIT(tavor_ci_free_ah);
705 		return (status);
706 	}
707 
708 	TAVOR_TNF_EXIT(tavor_ci_free_ah);
709 	return (IBT_SUCCESS);
710 }
711 
712 
713 /*
714  * tavor_ci_query_ah()
715  *    Return the Address Vector information for a specified Address Handle
716  *    Context: Can be called from interrupt or base context.
717  */
718 static ibt_status_t
tavor_ci_query_ah(ibc_hca_hdl_t hca,ibc_ah_hdl_t ah,ibc_pd_hdl_t * pd_p,ibt_adds_vect_t * attr_p)719 tavor_ci_query_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibc_pd_hdl_t *pd_p,
720     ibt_adds_vect_t *attr_p)
721 {
722 	tavor_state_t	*state;
723 	tavor_ahhdl_t	ahhdl;
724 	tavor_pdhdl_t	pdhdl;
725 	int		status;
726 
727 	TAVOR_TNF_ENTER(tavor_ci_query_ah);
728 
729 	/* Check for valid HCA handle */
730 	if (hca == NULL) {
731 		TNF_PROBE_0(tavor_ci_query_ah_invhca_fail,
732 		    TAVOR_TNF_ERROR, "");
733 		TAVOR_TNF_EXIT(tavor_ci_query_ah);
734 		return (IBT_HCA_HDL_INVALID);
735 	}
736 
737 	/* Check for valid address handle pointer */
738 	if (ah == NULL) {
739 		TNF_PROBE_0(tavor_ci_query_ah_invahhdl_fail,
740 		    TAVOR_TNF_ERROR, "");
741 		TAVOR_TNF_EXIT(tavor_ci_query_ah);
742 		return (IBT_AH_HDL_INVALID);
743 	}
744 
745 	/* Grab the Tavor softstate pointer and AH handle */
746 	state = (tavor_state_t *)hca;
747 	ahhdl = (tavor_ahhdl_t)ah;
748 
749 	/* Query the AH */
750 	status = tavor_ah_query(state, ahhdl, &pdhdl, attr_p);
751 	if (status != DDI_SUCCESS) {
752 		TNF_PROBE_1(tavor_ci_query_ah_fail, TAVOR_TNF_ERROR, "",
753 		    tnf_uint, status, status);
754 		TAVOR_TNF_EXIT(tavor_ci_query_ah);
755 		return (status);
756 	}
757 
758 	/* Return the Tavor PD handle */
759 	*pd_p = (ibc_pd_hdl_t)pdhdl;
760 
761 	TAVOR_TNF_EXIT(tavor_ci_query_ah);
762 	return (IBT_SUCCESS);
763 }
764 
765 
766 /*
767  * tavor_ci_modify_ah()
768  *    Modify the Address Vector information of a specified Address Handle
769  *    Context: Can be called from interrupt or base context.
770  */
771 static ibt_status_t
tavor_ci_modify_ah(ibc_hca_hdl_t hca,ibc_ah_hdl_t ah,ibt_adds_vect_t * attr_p)772 tavor_ci_modify_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibt_adds_vect_t *attr_p)
773 {
774 	tavor_state_t	*state;
775 	tavor_ahhdl_t	ahhdl;
776 	int		status;
777 
778 	TAVOR_TNF_ENTER(tavor_ci_modify_ah);
779 
780 	/* Check for valid HCA handle */
781 	if (hca == NULL) {
782 		TNF_PROBE_0(tavor_ci_modify_ah_invhca_fail,
783 		    TAVOR_TNF_ERROR, "");
784 		TAVOR_TNF_EXIT(tavor_ci_modify_ah);
785 		return (IBT_HCA_HDL_INVALID);
786 	}
787 
788 	/* Check for valid address handle pointer */
789 	if (ah == NULL) {
790 		TNF_PROBE_0(tavor_ci_modify_ah_invahhdl_fail,
791 		    TAVOR_TNF_ERROR, "");
792 		TAVOR_TNF_EXIT(tavor_ci_modify_ah);
793 		return (IBT_AH_HDL_INVALID);
794 	}
795 
796 	/* Grab the Tavor softstate pointer and AH handle */
797 	state = (tavor_state_t *)hca;
798 	ahhdl = (tavor_ahhdl_t)ah;
799 
800 	/* Modify the AH */
801 	status = tavor_ah_modify(state, ahhdl, attr_p);
802 	if (status != DDI_SUCCESS) {
803 		TNF_PROBE_1(tavor_ci_modify_ah_fail, TAVOR_TNF_ERROR, "",
804 		    tnf_uint, status, status);
805 		TAVOR_TNF_EXIT(tavor_ci_modify_ah);
806 		return (status);
807 	}
808 
809 	TAVOR_TNF_EXIT(tavor_ci_modify_ah);
810 	return (IBT_SUCCESS);
811 }
812 
813 
814 /*
815  * tavor_ci_alloc_qp()
816  *    Allocate a Queue Pair
817  *    Context: Can be called only from user or kernel context.
818  */
819 static ibt_status_t
tavor_ci_alloc_qp(ibc_hca_hdl_t hca,ibtl_qp_hdl_t ibt_qphdl,ibt_qp_type_t type,ibt_qp_alloc_attr_t * attr_p,ibt_chan_sizes_t * queue_sizes_p,ib_qpn_t * qpn,ibc_qp_hdl_t * qp_p)820 tavor_ci_alloc_qp(ibc_hca_hdl_t hca, ibtl_qp_hdl_t ibt_qphdl,
821     ibt_qp_type_t type, ibt_qp_alloc_attr_t *attr_p,
822     ibt_chan_sizes_t *queue_sizes_p, ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
823 {
824 	tavor_state_t		*state;
825 	tavor_qp_info_t		qpinfo;
826 	tavor_qp_options_t	op;
827 	int			status;
828 
829 	TAVOR_TNF_ENTER(tavor_ci_alloc_qp);
830 
831 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
832 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
833 
834 	/* Check for valid HCA handle */
835 	if (hca == NULL) {
836 		TNF_PROBE_0(tavor_ci_alloc_qp_invhca_fail,
837 		    TAVOR_TNF_ERROR, "");
838 		TAVOR_TNF_EXIT(tavor_ci_alloc_qp);
839 		return (IBT_HCA_HDL_INVALID);
840 	}
841 
842 	/* Grab the Tavor softstate pointer */
843 	state = (tavor_state_t *)hca;
844 
845 	/* Allocate the QP */
846 	qpinfo.qpi_attrp	= attr_p;
847 	qpinfo.qpi_type		= type;
848 	qpinfo.qpi_ibt_qphdl	= ibt_qphdl;
849 	qpinfo.qpi_queueszp	= queue_sizes_p;
850 	qpinfo.qpi_qpn		= qpn;
851 	op.qpo_wq_loc		= state->ts_cfg_profile->cp_qp_wq_inddr;
852 	status = tavor_qp_alloc(state, &qpinfo, TAVOR_NOSLEEP, &op);
853 	if (status != DDI_SUCCESS) {
854 		TNF_PROBE_1(tavor_ci_alloc_qp_fail, TAVOR_TNF_ERROR, "",
855 		    tnf_uint, status, status);
856 		TAVOR_TNF_EXIT(tavor_ci_alloc_qp);
857 		return (status);
858 	}
859 
860 	/* Return the Tavor QP handle */
861 	*qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
862 
863 	TAVOR_TNF_EXIT(tavor_ci_alloc_qp);
864 	return (IBT_SUCCESS);
865 }
866 
867 
868 /*
869  * tavor_ci_alloc_special_qp()
870  *    Allocate a Special Queue Pair
871  *    Context: Can be called only from user or kernel context.
872  */
873 static ibt_status_t
tavor_ci_alloc_special_qp(ibc_hca_hdl_t hca,uint8_t port,ibtl_qp_hdl_t ibt_qphdl,ibt_sqp_type_t type,ibt_qp_alloc_attr_t * attr_p,ibt_chan_sizes_t * queue_sizes_p,ibc_qp_hdl_t * qp_p)874 tavor_ci_alloc_special_qp(ibc_hca_hdl_t hca, uint8_t port,
875     ibtl_qp_hdl_t ibt_qphdl, ibt_sqp_type_t type,
876     ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
877     ibc_qp_hdl_t *qp_p)
878 {
879 	tavor_state_t		*state;
880 	tavor_qp_info_t		qpinfo;
881 	tavor_qp_options_t	op;
882 	int			status;
883 
884 	TAVOR_TNF_ENTER(tavor_ci_alloc_special_qp);
885 
886 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
887 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
888 
889 	/* Check for valid HCA handle */
890 	if (hca == NULL) {
891 		TNF_PROBE_0(tavor_ci_alloc_special_qp_invhca_fail,
892 		    TAVOR_TNF_ERROR, "");
893 		TAVOR_TNF_EXIT(tavor_ci_alloc_special_qp);
894 		return (IBT_HCA_HDL_INVALID);
895 	}
896 
897 	/* Grab the Tavor softstate pointer */
898 	state = (tavor_state_t *)hca;
899 
900 	/* Allocate the Special QP */
901 	qpinfo.qpi_attrp	= attr_p;
902 	qpinfo.qpi_type		= type;
903 	qpinfo.qpi_port		= port;
904 	qpinfo.qpi_ibt_qphdl	= ibt_qphdl;
905 	qpinfo.qpi_queueszp	= queue_sizes_p;
906 	op.qpo_wq_loc		= state->ts_cfg_profile->cp_qp_wq_inddr;
907 	status = tavor_special_qp_alloc(state, &qpinfo, TAVOR_NOSLEEP, &op);
908 	if (status != DDI_SUCCESS) {
909 		TNF_PROBE_1(tavor_ci_alloc_special_qp_fail, TAVOR_TNF_ERROR,
910 		    "", tnf_uint, status, status);
911 		TAVOR_TNF_EXIT(tavor_ci_alloc_special_qp);
912 		return (status);
913 	}
914 
915 	/* Return the Tavor QP handle */
916 	*qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
917 
918 	TAVOR_TNF_EXIT(tavor_ci_alloc_special_qp);
919 	return (IBT_SUCCESS);
920 }
921 
922 
923 /* ARGSUSED */
924 static ibt_status_t
tavor_ci_alloc_qp_range(ibc_hca_hdl_t hca,uint_t log2,ibtl_qp_hdl_t * ibtl_qp_p,ibt_qp_type_t type,ibt_qp_alloc_attr_t * attr_p,ibt_chan_sizes_t * queue_sizes_p,ibc_cq_hdl_t * send_cq_p,ibc_cq_hdl_t * recv_cq_p,ib_qpn_t * qpn_p,ibc_qp_hdl_t * qp_p)925 tavor_ci_alloc_qp_range(ibc_hca_hdl_t hca, uint_t log2,
926     ibtl_qp_hdl_t *ibtl_qp_p, ibt_qp_type_t type,
927     ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
928     ibc_cq_hdl_t *send_cq_p, ibc_cq_hdl_t *recv_cq_p,
929     ib_qpn_t *qpn_p, ibc_qp_hdl_t *qp_p)
930 {
931 	return (IBT_NOT_SUPPORTED);
932 }
933 
934 /*
935  * tavor_ci_free_qp()
936  *    Free a Queue Pair
937  *    Context: Can be called only from user or kernel context.
938  */
939 static ibt_status_t
tavor_ci_free_qp(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ibc_free_qp_flags_t free_qp_flags,ibc_qpn_hdl_t * qpnh_p)940 tavor_ci_free_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
941     ibc_free_qp_flags_t free_qp_flags, ibc_qpn_hdl_t *qpnh_p)
942 {
943 	tavor_state_t	*state;
944 	tavor_qphdl_t	qphdl;
945 	int		status;
946 
947 	TAVOR_TNF_ENTER(tavor_ci_free_qp);
948 
949 	/* Check for valid HCA handle */
950 	if (hca == NULL) {
951 		TNF_PROBE_0(tavor_ci_free_qp_invhca_fail,
952 		    TAVOR_TNF_ERROR, "");
953 		TAVOR_TNF_EXIT(tavor_ci_free_qp);
954 		return (IBT_HCA_HDL_INVALID);
955 	}
956 
957 	/* Check for valid QP handle pointer */
958 	if (qp == NULL) {
959 		TNF_PROBE_0(tavor_ci_free_qp_invqphdl_fail,
960 		    TAVOR_TNF_ERROR, "");
961 		TAVOR_TNF_EXIT(tavor_ci_free_qp);
962 		return (IBT_QP_HDL_INVALID);
963 	}
964 
965 	/* Grab the Tavor softstate pointer and QP handle */
966 	state = (tavor_state_t *)hca;
967 	qphdl = (tavor_qphdl_t)qp;
968 
969 	/* Free the QP */
970 	status = tavor_qp_free(state, &qphdl, free_qp_flags, qpnh_p,
971 	    TAVOR_NOSLEEP);
972 	if (status != DDI_SUCCESS) {
973 		TNF_PROBE_1(tavor_ci_free_qp_fail, TAVOR_TNF_ERROR, "",
974 		    tnf_uint, status, status);
975 		TAVOR_TNF_EXIT(tavor_ci_free_qp);
976 		return (status);
977 	}
978 
979 	TAVOR_TNF_EXIT(tavor_ci_free_qp);
980 	return (IBT_SUCCESS);
981 }
982 
983 
984 /*
985  * tavor_ci_release_qpn()
986  *    Release a Queue Pair Number (QPN)
987  *    Context: Can be called only from user or kernel context.
988  */
989 static ibt_status_t
tavor_ci_release_qpn(ibc_hca_hdl_t hca,ibc_qpn_hdl_t qpnh)990 tavor_ci_release_qpn(ibc_hca_hdl_t hca, ibc_qpn_hdl_t qpnh)
991 {
992 	tavor_state_t		*state;
993 	tavor_qpn_entry_t	*entry;
994 
995 	TAVOR_TNF_ENTER(tavor_ci_release_qpn);
996 
997 	/* Check for valid HCA handle */
998 	if (hca == NULL) {
999 		TNF_PROBE_0(tavor_ci_release_qpn_invhca_fail,
1000 		    TAVOR_TNF_ERROR, "");
1001 		TAVOR_TNF_EXIT(tavor_ci_release_qpn);
1002 		return (IBT_HCA_HDL_INVALID);
1003 	}
1004 
1005 	/* Check for valid QP handle pointer */
1006 	if (qpnh == NULL) {
1007 		TNF_PROBE_0(tavor_ci_release_qpn_invqpnhdl_fail,
1008 		    TAVOR_TNF_ERROR, "");
1009 		TAVOR_TNF_EXIT(tavor_ci_release_qpn);
1010 		return (IBT_QP_HDL_INVALID);
1011 	}
1012 
1013 	/* Grab the Tavor softstate pointer and QP handle */
1014 	state = (tavor_state_t *)hca;
1015 	entry = (tavor_qpn_entry_t *)qpnh;
1016 
1017 	/* Release the QP number */
1018 	tavor_qp_release_qpn(state, entry, TAVOR_QPN_RELEASE);
1019 
1020 	TAVOR_TNF_EXIT(tavor_ci_release_qpn);
1021 	return (IBT_SUCCESS);
1022 }
1023 
1024 
1025 /*
1026  * tavor_ci_query_qp()
1027  *    Query a Queue Pair
1028  *    Context: Can be called from interrupt or base context.
1029  */
1030 static ibt_status_t
tavor_ci_query_qp(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ibt_qp_query_attr_t * attr_p)1031 tavor_ci_query_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
1032     ibt_qp_query_attr_t *attr_p)
1033 {
1034 	tavor_state_t	*state;
1035 	tavor_qphdl_t	qphdl;
1036 	int		status;
1037 
1038 	TAVOR_TNF_ENTER(tavor_ci_query_qp);
1039 
1040 	/* Check for valid HCA handle */
1041 	if (hca == NULL) {
1042 		TNF_PROBE_0(tavor_ci_query_qp_invhca_fail,
1043 		    TAVOR_TNF_ERROR, "");
1044 		TAVOR_TNF_EXIT(tavor_ci_query_qp);
1045 		return (IBT_HCA_HDL_INVALID);
1046 	}
1047 
1048 	/* Check for valid QP handle */
1049 	if (qp == NULL) {
1050 		TNF_PROBE_0(tavor_ci_query_qp_invqphdl_fail,
1051 		    TAVOR_TNF_ERROR, "");
1052 		TAVOR_TNF_EXIT(tavor_ci_query_qp);
1053 		return (IBT_QP_HDL_INVALID);
1054 	}
1055 
1056 	/* Grab the Tavor softstate pointer and QP handle */
1057 	state = (tavor_state_t *)hca;
1058 	qphdl = (tavor_qphdl_t)qp;
1059 
1060 	/* Query the QP */
1061 	status = tavor_qp_query(state, qphdl, attr_p);
1062 	if (status != DDI_SUCCESS) {
1063 		TNF_PROBE_1(tavor_ci_query_qp_fail, TAVOR_TNF_ERROR, "",
1064 		    tnf_uint, status, status);
1065 		TAVOR_TNF_EXIT(tavor_ci_query_qp);
1066 		return (status);
1067 	}
1068 
1069 	TAVOR_TNF_EXIT(tavor_ci_query_qp);
1070 	return (IBT_SUCCESS);
1071 }
1072 
1073 
1074 /*
1075  * tavor_ci_modify_qp()
1076  *    Modify a Queue Pair
1077  *    Context: Can be called from interrupt or base context.
1078  */
1079 static ibt_status_t
tavor_ci_modify_qp(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ibt_cep_modify_flags_t flags,ibt_qp_info_t * info_p,ibt_queue_sizes_t * actual_sz)1080 tavor_ci_modify_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
1081     ibt_cep_modify_flags_t flags, ibt_qp_info_t *info_p,
1082     ibt_queue_sizes_t *actual_sz)
1083 {
1084 	tavor_state_t	*state;
1085 	tavor_qphdl_t	qphdl;
1086 	int		status;
1087 
1088 	TAVOR_TNF_ENTER(tavor_ci_modify_qp);
1089 
1090 	/* Check for valid HCA handle */
1091 	if (hca == NULL) {
1092 		TNF_PROBE_0(tavor_ci_modify_qp_invhca_fail,
1093 		    TAVOR_TNF_ERROR, "");
1094 		TAVOR_TNF_EXIT(tavor_ci_modify_qp);
1095 		return (IBT_HCA_HDL_INVALID);
1096 	}
1097 
1098 	/* Check for valid QP handle */
1099 	if (qp == NULL) {
1100 		TNF_PROBE_0(tavor_ci_modify_qp_invqphdl_fail,
1101 		    TAVOR_TNF_ERROR, "");
1102 		TAVOR_TNF_EXIT(tavor_ci_modify_qp);
1103 		return (IBT_QP_HDL_INVALID);
1104 	}
1105 
1106 	/* Grab the Tavor softstate pointer and QP handle */
1107 	state = (tavor_state_t *)hca;
1108 	qphdl = (tavor_qphdl_t)qp;
1109 
1110 	/* Modify the QP */
1111 	status = tavor_qp_modify(state, qphdl, flags, info_p, actual_sz);
1112 	if (status != DDI_SUCCESS) {
1113 		TNF_PROBE_1(tavor_ci_modify_qp_fail, TAVOR_TNF_ERROR, "",
1114 		    tnf_uint, status, status);
1115 		TAVOR_TNF_EXIT(tavor_ci_modify_qp);
1116 		return (status);
1117 	}
1118 
1119 	TAVOR_TNF_EXIT(tavor_ci_modify_qp);
1120 	return (IBT_SUCCESS);
1121 }
1122 
1123 
1124 /*
1125  * tavor_ci_alloc_cq()
1126  *    Allocate a Completion Queue
1127  *    Context: Can be called only from user or kernel context.
1128  */
1129 /* ARGSUSED */
1130 static ibt_status_t
tavor_ci_alloc_cq(ibc_hca_hdl_t hca,ibt_cq_hdl_t ibt_cqhdl,ibt_cq_attr_t * attr_p,ibc_cq_hdl_t * cq_p,uint_t * actual_size)1131 tavor_ci_alloc_cq(ibc_hca_hdl_t hca, ibt_cq_hdl_t ibt_cqhdl,
1132     ibt_cq_attr_t *attr_p, ibc_cq_hdl_t *cq_p, uint_t *actual_size)
1133 {
1134 	tavor_state_t	*state;
1135 	tavor_cqhdl_t	cqhdl;
1136 	int		status;
1137 
1138 	TAVOR_TNF_ENTER(tavor_ci_alloc_cq);
1139 
1140 	/* Check for valid HCA handle */
1141 	if (hca == NULL) {
1142 		TNF_PROBE_0(tavor_ci_alloc_cq_invhca_fail,
1143 		    TAVOR_TNF_ERROR, "");
1144 		TAVOR_TNF_EXIT(tavor_ci_alloc_cq);
1145 		return (IBT_HCA_HDL_INVALID);
1146 	}
1147 
1148 	/* Grab the Tavor softstate pointer */
1149 	state = (tavor_state_t *)hca;
1150 
1151 	/* Allocate the CQ */
1152 	status = tavor_cq_alloc(state, ibt_cqhdl, attr_p, actual_size,
1153 	    &cqhdl, TAVOR_NOSLEEP);
1154 	if (status != DDI_SUCCESS) {
1155 		TNF_PROBE_1(tavor_ci_alloc_cq_fail, TAVOR_TNF_ERROR, "",
1156 		    tnf_uint, status, status);
1157 		TAVOR_TNF_EXIT(tavor_ci_alloc_cq);
1158 		return (status);
1159 	}
1160 
1161 	/* Return the Tavor CQ handle */
1162 	*cq_p = (ibc_cq_hdl_t)cqhdl;
1163 
1164 	TAVOR_TNF_EXIT(tavor_ci_alloc_cq);
1165 	return (IBT_SUCCESS);
1166 }
1167 
1168 
1169 /*
1170  * tavor_ci_free_cq()
1171  *    Free a Completion Queue
1172  *    Context: Can be called only from user or kernel context.
1173  */
1174 static ibt_status_t
tavor_ci_free_cq(ibc_hca_hdl_t hca,ibc_cq_hdl_t cq)1175 tavor_ci_free_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq)
1176 {
1177 	tavor_state_t	*state;
1178 	tavor_cqhdl_t	cqhdl;
1179 	int		status;
1180 
1181 	TAVOR_TNF_ENTER(tavor_ci_free_cq);
1182 
1183 
1184 	/* Check for valid HCA handle */
1185 	if (hca == NULL) {
1186 		TNF_PROBE_0(tavor_ci_free_cq_invhca_fail,
1187 		    TAVOR_TNF_ERROR, "");
1188 		TAVOR_TNF_EXIT(tavor_ci_free_cq);
1189 		return (IBT_HCA_HDL_INVALID);
1190 	}
1191 
1192 	/* Check for valid CQ handle pointer */
1193 	if (cq == NULL) {
1194 		TNF_PROBE_0(tavor_ci_free_cq_invcqhdl_fail,
1195 		    TAVOR_TNF_ERROR, "");
1196 		TAVOR_TNF_EXIT(tavor_ci_free_cq);
1197 		return (IBT_CQ_HDL_INVALID);
1198 	}
1199 
1200 	/* Grab the Tavor softstate pointer and CQ handle */
1201 	state = (tavor_state_t *)hca;
1202 	cqhdl = (tavor_cqhdl_t)cq;
1203 
1204 	/* Free the CQ */
1205 	status = tavor_cq_free(state, &cqhdl, TAVOR_NOSLEEP);
1206 	if (status != DDI_SUCCESS) {
1207 		TNF_PROBE_1(tavor_ci_free_cq_fail, TAVOR_TNF_ERROR, "",
1208 		    tnf_uint, status, status);
1209 		TAVOR_TNF_EXIT(tavor_ci_free_cq);
1210 		return (status);
1211 	}
1212 
1213 	TAVOR_TNF_EXIT(tavor_ci_free_cq);
1214 	return (IBT_SUCCESS);
1215 }
1216 
1217 
1218 /*
1219  * tavor_ci_query_cq()
1220  *    Return the size of a Completion Queue
1221  *    Context: Can be called only from user or kernel context.
1222  */
1223 static ibt_status_t
tavor_ci_query_cq(ibc_hca_hdl_t hca,ibc_cq_hdl_t cq,uint_t * entries_p,uint_t * count_p,uint_t * usec_p,ibt_cq_handler_id_t * hid_p)1224 tavor_ci_query_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t *entries_p,
1225     uint_t *count_p, uint_t *usec_p, ibt_cq_handler_id_t *hid_p)
1226 {
1227 	tavor_cqhdl_t	cqhdl;
1228 
1229 	TAVOR_TNF_ENTER(tavor_ci_query_cq);
1230 
1231 	/* Check for valid HCA handle */
1232 	if (hca == NULL) {
1233 		TNF_PROBE_0(tavor_ci_query_cq_invhca_fail,
1234 		    TAVOR_TNF_ERROR, "");
1235 		TAVOR_TNF_EXIT(tavor_ci_query_cq);
1236 		return (IBT_HCA_HDL_INVALID);
1237 	}
1238 
1239 	/* Check for valid CQ handle pointer */
1240 	if (cq == NULL) {
1241 		TNF_PROBE_0(tavor_ci_query_cq_invcqhdl,
1242 		    TAVOR_TNF_ERROR, "");
1243 		TAVOR_TNF_EXIT(tavor_ci_query_cq);
1244 		return (IBT_CQ_HDL_INVALID);
1245 	}
1246 
1247 	/* Grab the CQ handle */
1248 	cqhdl = (tavor_cqhdl_t)cq;
1249 
1250 	/* Query the current CQ size */
1251 	*entries_p = cqhdl->cq_bufsz;
1252 
1253 	/* interrupt moderation is not supported */
1254 	*count_p = 0;
1255 	*usec_p = 0;
1256 	*hid_p = 0;
1257 
1258 	TAVOR_TNF_EXIT(tavor_ci_query_cq);
1259 	return (IBT_SUCCESS);
1260 }
1261 
1262 
1263 /*
1264  * tavor_ci_resize_cq()
1265  *    Change the size of a Completion Queue
1266  *    Context: Can be called only from user or kernel context.
1267  */
1268 static ibt_status_t
tavor_ci_resize_cq(ibc_hca_hdl_t hca,ibc_cq_hdl_t cq,uint_t size,uint_t * actual_size)1269 tavor_ci_resize_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t size,
1270     uint_t *actual_size)
1271 {
1272 	tavor_state_t		*state;
1273 	tavor_cqhdl_t		cqhdl;
1274 	int			status;
1275 
1276 	TAVOR_TNF_ENTER(tavor_ci_resize_cq);
1277 
1278 	/* Check for valid HCA handle */
1279 	if (hca == NULL) {
1280 		TNF_PROBE_0(tavor_ci_resize_cq_invhca_fail,
1281 		    TAVOR_TNF_ERROR, "");
1282 		TAVOR_TNF_EXIT(tavor_ci_resize_cq);
1283 		return (IBT_HCA_HDL_INVALID);
1284 	}
1285 
1286 	/* Check for valid CQ handle pointer */
1287 	if (cq == NULL) {
1288 		TNF_PROBE_0(tavor_ci_resize_cq_invcqhdl_fail,
1289 		    TAVOR_TNF_ERROR, "");
1290 		TAVOR_TNF_EXIT(tavor_ci_resize_cq);
1291 		return (IBT_CQ_HDL_INVALID);
1292 	}
1293 
1294 	/* Grab the Tavor softstate pointer and CQ handle */
1295 	state = (tavor_state_t *)hca;
1296 	cqhdl = (tavor_cqhdl_t)cq;
1297 
1298 	/* Resize the CQ */
1299 	status = tavor_cq_resize(state, cqhdl, size, actual_size,
1300 	    TAVOR_NOSLEEP);
1301 	if (status != DDI_SUCCESS) {
1302 		TNF_PROBE_1(tavor_ci_resize_cq_fail, TAVOR_TNF_ERROR, "",
1303 		    tnf_uint, status, status);
1304 		TAVOR_TNF_EXIT(tavor_ci_resize_cq);
1305 		return (status);
1306 	}
1307 
1308 	TAVOR_TNF_EXIT(tavor_ci_resize_cq);
1309 	return (IBT_SUCCESS);
1310 }
1311 
1312 /*
1313  * CQ interrupt moderation is not supported in tavor.
1314  */
1315 
1316 /* ARGSUSED */
1317 static ibt_status_t
tavor_ci_modify_cq(ibc_hca_hdl_t hca,ibc_cq_hdl_t cq,uint_t count,uint_t usec,ibt_cq_handler_id_t hid)1318 tavor_ci_modify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq,
1319     uint_t count, uint_t usec, ibt_cq_handler_id_t hid)
1320 {
1321 	return (IBT_NOT_SUPPORTED);
1322 }
1323 
1324 /*
1325  * tavor_ci_alloc_cq_sched()
1326  *    Reserve a CQ scheduling class resource
1327  *    Context: Can be called only from user or kernel context.
1328  */
1329 /* ARGSUSED */
1330 static ibt_status_t
tavor_ci_alloc_cq_sched(ibc_hca_hdl_t hca,ibt_cq_sched_attr_t * attr,ibc_sched_hdl_t * sched_hdl_p)1331 tavor_ci_alloc_cq_sched(ibc_hca_hdl_t hca, ibt_cq_sched_attr_t *attr,
1332     ibc_sched_hdl_t *sched_hdl_p)
1333 {
1334 	if (hca == NULL) {
1335 		return (IBT_HCA_HDL_INVALID);
1336 	}
1337 	*sched_hdl_p = NULL;
1338 
1339 	/*
1340 	 * This is an unsupported interface for the Tavor driver.  Tavor
1341 	 * does not support CQ scheduling classes.
1342 	 */
1343 	return (IBT_SUCCESS);
1344 }
1345 
1346 
1347 /*
1348  * tavor_ci_free_cq_sched()
1349  *    Free a CQ scheduling class resource
1350  *    Context: Can be called only from user or kernel context.
1351  */
1352 /* ARGSUSED */
1353 static ibt_status_t
tavor_ci_free_cq_sched(ibc_hca_hdl_t hca,ibc_sched_hdl_t sched_hdl)1354 tavor_ci_free_cq_sched(ibc_hca_hdl_t hca, ibc_sched_hdl_t sched_hdl)
1355 {
1356 	if (hca == NULL) {
1357 		return (IBT_HCA_HDL_INVALID);
1358 	}
1359 
1360 	/*
1361 	 * This is an unsupported interface for the Tavor driver.  Tavor
1362 	 * does not support CQ scheduling classes.
1363 	 */
1364 	return (IBT_SUCCESS);
1365 }
1366 
1367 
1368 /*
1369  * tavor_ci_alloc_eec()
1370  *    Allocate an End-to-End context
1371  *    Context: Can be called only from user or kernel context.
1372  */
1373 /* ARGSUSED */
1374 static ibt_status_t
tavor_ci_alloc_eec(ibc_hca_hdl_t hca,ibc_eec_flags_t flags,ibt_eec_hdl_t ibt_eec,ibc_rdd_hdl_t rdd,ibc_eec_hdl_t * eec_p)1375 tavor_ci_alloc_eec(ibc_hca_hdl_t hca, ibc_eec_flags_t flags,
1376     ibt_eec_hdl_t ibt_eec, ibc_rdd_hdl_t rdd, ibc_eec_hdl_t *eec_p)
1377 {
1378 	TAVOR_TNF_ENTER(tavor_ci_alloc_eec);
1379 
1380 	/*
1381 	 * This is an unsupported interface for the Tavor driver.  This
1382 	 * interface is necessary to support Reliable Datagram (RD)
1383 	 * operations.  Tavor does not support RD.
1384 	 */
1385 
1386 	TAVOR_TNF_EXIT(tavor_ci_alloc_eec);
1387 	return (IBT_NOT_SUPPORTED);
1388 }
1389 
1390 
1391 /*
1392  * tavor_ci_free_eec()
1393  *    Free an End-to-End context
1394  *    Context: Can be called only from user or kernel context.
1395  */
1396 /* ARGSUSED */
1397 static ibt_status_t
tavor_ci_free_eec(ibc_hca_hdl_t hca,ibc_eec_hdl_t eec)1398 tavor_ci_free_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec)
1399 {
1400 	TAVOR_TNF_ENTER(tavor_ci_free_eec);
1401 
1402 	/*
1403 	 * This is an unsupported interface for the Tavor driver.  This
1404 	 * interface is necessary to support Reliable Datagram (RD)
1405 	 * operations.  Tavor does not support RD.
1406 	 */
1407 
1408 	TAVOR_TNF_EXIT(tavor_ci_free_eec);
1409 	return (IBT_NOT_SUPPORTED);
1410 }
1411 
1412 
1413 /*
1414  * tavor_ci_query_eec()
1415  *    Query an End-to-End context
1416  *    Context: Can be called from interrupt or base context.
1417  */
1418 /* ARGSUSED */
1419 static ibt_status_t
tavor_ci_query_eec(ibc_hca_hdl_t hca,ibc_eec_hdl_t eec,ibt_eec_query_attr_t * attr_p)1420 tavor_ci_query_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1421     ibt_eec_query_attr_t *attr_p)
1422 {
1423 	TAVOR_TNF_ENTER(tavor_ci_query_eec);
1424 
1425 	/*
1426 	 * This is an unsupported interface for the Tavor driver.  This
1427 	 * interface is necessary to support Reliable Datagram (RD)
1428 	 * operations.  Tavor does not support RD.
1429 	 */
1430 
1431 	TAVOR_TNF_EXIT(tavor_ci_query_eec);
1432 	return (IBT_NOT_SUPPORTED);
1433 }
1434 
1435 
1436 /*
1437  * tavor_ci_modify_eec()
1438  *    Modify an End-to-End context
1439  *    Context: Can be called from interrupt or base context.
1440  */
1441 /* ARGSUSED */
1442 static ibt_status_t
tavor_ci_modify_eec(ibc_hca_hdl_t hca,ibc_eec_hdl_t eec,ibt_cep_modify_flags_t flags,ibt_eec_info_t * info_p)1443 tavor_ci_modify_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1444     ibt_cep_modify_flags_t flags, ibt_eec_info_t *info_p)
1445 {
1446 	TAVOR_TNF_ENTER(tavor_ci_query_eec);
1447 
1448 	/*
1449 	 * This is an unsupported interface for the Tavor driver.  This
1450 	 * interface is necessary to support Reliable Datagram (RD)
1451 	 * operations.  Tavor does not support RD.
1452 	 */
1453 
1454 	TAVOR_TNF_EXIT(tavor_ci_query_eec);
1455 	return (IBT_NOT_SUPPORTED);
1456 }
1457 
1458 
1459 /*
1460  * tavor_ci_register_mr()
1461  *    Prepare a virtually addressed Memory Region for use by an HCA
1462  *    Context: Can be called from interrupt or base context.
1463  */
1464 /* ARGSUSED */
1465 static ibt_status_t
tavor_ci_register_mr(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_mr_attr_t * mr_attr,void * ibtl_reserved,ibc_mr_hdl_t * mr_p,ibt_mr_desc_t * mr_desc)1466 tavor_ci_register_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1467     ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1468     ibt_mr_desc_t *mr_desc)
1469 {
1470 	tavor_mr_options_t	op;
1471 	tavor_state_t		*state;
1472 	tavor_pdhdl_t		pdhdl;
1473 	tavor_mrhdl_t		mrhdl;
1474 	int			status;
1475 
1476 	TAVOR_TNF_ENTER(tavor_ci_register_mr);
1477 
1478 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1479 
1480 	ASSERT(mr_attr != NULL);
1481 	ASSERT(mr_p != NULL);
1482 	ASSERT(mr_desc != NULL);
1483 
1484 	/* Check for valid HCA handle */
1485 	if (hca == NULL) {
1486 		TNF_PROBE_0(tavor_ci_register_mr_invhca_fail,
1487 		    TAVOR_TNF_ERROR, "");
1488 		TAVOR_TNF_EXIT(tavor_ci_register_mr);
1489 		return (IBT_HCA_HDL_INVALID);
1490 	}
1491 
1492 	/* Check for valid PD handle pointer */
1493 	if (pd == NULL) {
1494 		TNF_PROBE_0(tavor_ci_register_mr_invpdhdl_fail,
1495 		    TAVOR_TNF_ERROR, "");
1496 		TAVOR_TNF_EXIT(tavor_ci_register_mr);
1497 		return (IBT_PD_HDL_INVALID);
1498 	}
1499 
1500 	/*
1501 	 * Validate the access flags.  Both Remote Write and Remote Atomic
1502 	 * require the Local Write flag to be set
1503 	 */
1504 	if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1505 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1506 	    !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1507 		TNF_PROBE_0(tavor_ci_register_mr_inv_accflags_fail,
1508 		    TAVOR_TNF_ERROR, "");
1509 		TAVOR_TNF_EXIT(tavor_ci_register_mr);
1510 		return (IBT_MR_ACCESS_REQ_INVALID);
1511 	}
1512 
1513 	/* Grab the Tavor softstate pointer and PD handle */
1514 	state = (tavor_state_t *)hca;
1515 	pdhdl = (tavor_pdhdl_t)pd;
1516 
1517 	/* Register the memory region */
1518 	op.mro_bind_type   = state->ts_cfg_profile->cp_iommu_bypass;
1519 	op.mro_bind_dmahdl = NULL;
1520 	op.mro_bind_override_addr = 0;
1521 	status = tavor_mr_register(state, pdhdl, mr_attr, &mrhdl, &op);
1522 	if (status != DDI_SUCCESS) {
1523 		TNF_PROBE_1(tavor_ci_register_mr_fail, TAVOR_TNF_ERROR, "",
1524 		    tnf_uint, status, status);
1525 		TAVOR_TNF_EXIT(tavor_ci_register_mr);
1526 		return (status);
1527 	}
1528 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1529 
1530 	/* Fill in the mr_desc structure */
1531 	mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1532 	mr_desc->md_lkey  = mrhdl->mr_lkey;
1533 	/* Only set RKey if remote access was requested */
1534 	if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1535 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1536 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1537 		mr_desc->md_rkey = mrhdl->mr_rkey;
1538 	}
1539 
1540 	/*
1541 	 * If region is mapped for streaming (i.e. noncoherent), then set
1542 	 * sync is required
1543 	 */
1544 	mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1545 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1546 
1547 	/* Return the Tavor MR handle */
1548 	*mr_p = (ibc_mr_hdl_t)mrhdl;
1549 
1550 	TAVOR_TNF_EXIT(tavor_ci_register_mr);
1551 	return (IBT_SUCCESS);
1552 }
1553 
1554 
1555 /*
1556  * tavor_ci_register_buf()
1557  *    Prepare a Memory Region specified by buf structure for use by an HCA
1558  *    Context: Can be called from interrupt or base context.
1559  */
1560 /* ARGSUSED */
1561 static ibt_status_t
tavor_ci_register_buf(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_smr_attr_t * attrp,struct buf * buf,void * ibtl_reserved,ibt_mr_hdl_t * mr_p,ibt_mr_desc_t * mr_desc)1562 tavor_ci_register_buf(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1563     ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1564     ibt_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1565 {
1566 	tavor_mr_options_t	op;
1567 	tavor_state_t		*state;
1568 	tavor_pdhdl_t		pdhdl;
1569 	tavor_mrhdl_t		mrhdl;
1570 	int			status;
1571 	ibt_mr_flags_t		flags = attrp->mr_flags;
1572 
1573 	TAVOR_TNF_ENTER(tavor_ci_register_buf);
1574 
1575 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1576 
1577 	ASSERT(mr_p != NULL);
1578 	ASSERT(mr_desc != NULL);
1579 
1580 	/* Check for valid HCA handle */
1581 	if (hca == NULL) {
1582 		TNF_PROBE_0(tavor_ci_register_buf_invhca_fail,
1583 		    TAVOR_TNF_ERROR, "");
1584 		TAVOR_TNF_EXIT(tavor_ci_register_buf);
1585 		return (IBT_HCA_HDL_INVALID);
1586 	}
1587 
1588 	/* Check for valid PD handle pointer */
1589 	if (pd == NULL) {
1590 		TNF_PROBE_0(tavor_ci_register_buf_invpdhdl_fail,
1591 		    TAVOR_TNF_ERROR, "");
1592 		TAVOR_TNF_EXIT(tavor_ci_register_buf);
1593 		return (IBT_PD_HDL_INVALID);
1594 	}
1595 
1596 	/*
1597 	 * Validate the access flags.  Both Remote Write and Remote Atomic
1598 	 * require the Local Write flag to be set
1599 	 */
1600 	if (((flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1601 	    (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1602 	    !(flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1603 		TNF_PROBE_0(tavor_ci_register_buf_accflags_inv,
1604 		    TAVOR_TNF_ERROR, "");
1605 		TAVOR_TNF_EXIT(tavor_ci_register_buf);
1606 		return (IBT_MR_ACCESS_REQ_INVALID);
1607 	}
1608 
1609 	/* Grab the Tavor softstate pointer and PD handle */
1610 	state = (tavor_state_t *)hca;
1611 	pdhdl = (tavor_pdhdl_t)pd;
1612 
1613 	/* Register the memory region */
1614 	op.mro_bind_type   = state->ts_cfg_profile->cp_iommu_bypass;
1615 	op.mro_bind_dmahdl = NULL;
1616 	op.mro_bind_override_addr = 0;
1617 	status = tavor_mr_register_buf(state, pdhdl, attrp, buf, &mrhdl, &op);
1618 	if (status != DDI_SUCCESS) {
1619 		TNF_PROBE_1(tavor_ci_register_mr_fail, TAVOR_TNF_ERROR, "",
1620 		    tnf_uint, status, status);
1621 		TAVOR_TNF_EXIT(tavor_ci_register_mr);
1622 		return (status);
1623 	}
1624 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1625 
1626 	/* Fill in the mr_desc structure */
1627 	mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1628 	mr_desc->md_lkey  = mrhdl->mr_lkey;
1629 	/* Only set RKey if remote access was requested */
1630 	if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1631 	    (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1632 	    (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1633 		mr_desc->md_rkey = mrhdl->mr_rkey;
1634 	}
1635 
1636 	/*
1637 	 * If region is mapped for streaming (i.e. noncoherent), then set
1638 	 * sync is required
1639 	 */
1640 	mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1641 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1642 
1643 	/* Return the Tavor MR handle */
1644 	*mr_p = (ibc_mr_hdl_t)mrhdl;
1645 
1646 	TAVOR_TNF_EXIT(tavor_ci_register_buf);
1647 	return (IBT_SUCCESS);
1648 }
1649 
1650 
1651 /*
1652  * tavor_ci_deregister_mr()
1653  *    Deregister a Memory Region from an HCA translation table
1654  *    Context: Can be called only from user or kernel context.
1655  */
1656 static ibt_status_t
tavor_ci_deregister_mr(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr)1657 tavor_ci_deregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
1658 {
1659 	tavor_state_t		*state;
1660 	tavor_mrhdl_t		mrhdl;
1661 	int			status;
1662 
1663 	TAVOR_TNF_ENTER(tavor_ci_deregister_mr);
1664 
1665 	/* Check for valid HCA handle */
1666 	if (hca == NULL) {
1667 		TNF_PROBE_0(tavor_ci_deregister_mr_invhca_fail,
1668 		    TAVOR_TNF_ERROR, "");
1669 		TAVOR_TNF_EXIT(tavor_ci_deregister_mr);
1670 		return (IBT_HCA_HDL_INVALID);
1671 	}
1672 
1673 	/* Check for valid memory region handle */
1674 	if (mr == NULL) {
1675 		TNF_PROBE_0(tavor_ci_deregister_mr_invmrhdl_fail,
1676 		    TAVOR_TNF_ERROR, "");
1677 		TAVOR_TNF_EXIT(tavor_ci_deregister_mr);
1678 		return (IBT_MR_HDL_INVALID);
1679 	}
1680 
1681 	/* Grab the Tavor softstate pointer */
1682 	state = (tavor_state_t *)hca;
1683 	mrhdl = (tavor_mrhdl_t)mr;
1684 
1685 	/*
1686 	 * Deregister the memory region.
1687 	 */
1688 	status = tavor_mr_deregister(state, &mrhdl, TAVOR_MR_DEREG_ALL,
1689 	    TAVOR_NOSLEEP);
1690 	if (status != DDI_SUCCESS) {
1691 		TNF_PROBE_1(tavor_ci_deregister_mr_fail,
1692 		    TAVOR_TNF_ERROR, "", tnf_uint, status, status);
1693 		TAVOR_TNF_EXIT(tavor_ci_deregister_mr);
1694 		return (status);
1695 	}
1696 
1697 	TAVOR_TNF_EXIT(tavor_ci_deregister_mr);
1698 	return (IBT_SUCCESS);
1699 }
1700 
1701 
1702 /*
1703  * tavor_ci_query_mr()
1704  *    Retrieve information about a specified Memory Region
1705  *    Context: Can be called from interrupt or base context.
1706  */
1707 static ibt_status_t
tavor_ci_query_mr(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr,ibt_mr_query_attr_t * mr_attr)1708 tavor_ci_query_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1709     ibt_mr_query_attr_t *mr_attr)
1710 {
1711 	tavor_state_t		*state;
1712 	tavor_mrhdl_t		mrhdl;
1713 	int			status;
1714 
1715 	TAVOR_TNF_ENTER(tavor_ci_query_mr);
1716 
1717 	ASSERT(mr_attr != NULL);
1718 
1719 	/* Check for valid HCA handle */
1720 	if (hca == NULL) {
1721 		TNF_PROBE_0(tavor_ci_query_mr_invhca_fail,
1722 		    TAVOR_TNF_ERROR, "");
1723 		TAVOR_TNF_EXIT(tavor_ci_query_mr);
1724 		return (IBT_HCA_HDL_INVALID);
1725 	}
1726 
1727 	/* Check for MemRegion handle */
1728 	if (mr == NULL) {
1729 		TNF_PROBE_0(tavor_ci_query_mr_invmrhdl_fail,
1730 		    TAVOR_TNF_ERROR, "");
1731 		TAVOR_TNF_EXIT(tavor_ci_query_mr);
1732 		return (IBT_MR_HDL_INVALID);
1733 	}
1734 
1735 	/* Grab the Tavor softstate pointer and MR handle */
1736 	state = (tavor_state_t *)hca;
1737 	mrhdl = (tavor_mrhdl_t)mr;
1738 
1739 	/* Query the memory region */
1740 	status = tavor_mr_query(state, mrhdl, mr_attr);
1741 	if (status != DDI_SUCCESS) {
1742 		TNF_PROBE_1(tavor_ci_query_mr_fail, TAVOR_TNF_ERROR, "",
1743 		    tnf_uint, status, status);
1744 		TAVOR_TNF_EXIT(tavor_ci_query_mr);
1745 		return (status);
1746 	}
1747 
1748 	TAVOR_TNF_EXIT(tavor_ci_query_mr);
1749 	return (IBT_SUCCESS);
1750 }
1751 
1752 
1753 /*
1754  * tavor_ci_register_shared_mr()
1755  *    Create a shared memory region matching an existing Memory Region
1756  *    Context: Can be called from interrupt or base context.
1757  */
1758 /* ARGSUSED */
1759 static ibt_status_t
tavor_ci_register_shared_mr(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr,ibc_pd_hdl_t pd,ibt_smr_attr_t * mr_attr,void * ibtl_reserved,ibc_mr_hdl_t * mr_p,ibt_mr_desc_t * mr_desc)1760 tavor_ci_register_shared_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1761     ibc_pd_hdl_t pd, ibt_smr_attr_t *mr_attr, void *ibtl_reserved,
1762     ibc_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1763 {
1764 	tavor_state_t		*state;
1765 	tavor_pdhdl_t		pdhdl;
1766 	tavor_mrhdl_t		mrhdl, mrhdl_new;
1767 	int			status;
1768 
1769 	TAVOR_TNF_ENTER(tavor_ci_register_shared_mr);
1770 
1771 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1772 
1773 	ASSERT(mr_attr != NULL);
1774 	ASSERT(mr_p != NULL);
1775 	ASSERT(mr_desc != NULL);
1776 
1777 	/* Check for valid HCA handle */
1778 	if (hca == NULL) {
1779 		TNF_PROBE_0(tavor_ci_register_shared_mr_invhca_fail,
1780 		    TAVOR_TNF_ERROR, "");
1781 		TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1782 		return (IBT_HCA_HDL_INVALID);
1783 	}
1784 
1785 	/* Check for valid PD handle pointer */
1786 	if (pd == NULL) {
1787 		TNF_PROBE_0(tavor_ci_register_shared_mr_invpdhdl_fail,
1788 		    TAVOR_TNF_ERROR, "");
1789 		TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1790 		return (IBT_PD_HDL_INVALID);
1791 	}
1792 
1793 	/* Check for valid memory region handle */
1794 	if (mr == NULL) {
1795 		TNF_PROBE_0(tavor_ci_register_shared_mr_invmrhdl_fail,
1796 		    TAVOR_TNF_ERROR, "");
1797 		TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1798 		return (IBT_MR_HDL_INVALID);
1799 	}
1800 	/*
1801 	 * Validate the access flags.  Both Remote Write and Remote Atomic
1802 	 * require the Local Write flag to be set
1803 	 */
1804 	if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1805 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1806 	    !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1807 		TNF_PROBE_0(tavor_ci_register_shared_mr_accflags_inv,
1808 		    TAVOR_TNF_ERROR, "");
1809 		TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1810 		return (IBT_MR_ACCESS_REQ_INVALID);
1811 	}
1812 
1813 	/* Grab the Tavor softstate pointer and handles */
1814 	state = (tavor_state_t *)hca;
1815 	pdhdl = (tavor_pdhdl_t)pd;
1816 	mrhdl = (tavor_mrhdl_t)mr;
1817 
1818 	/* Register the shared memory region */
1819 	status = tavor_mr_register_shared(state, mrhdl, pdhdl, mr_attr,
1820 	    &mrhdl_new);
1821 	if (status != DDI_SUCCESS) {
1822 		TNF_PROBE_1(tavor_ci_register_shared_mr_fail, TAVOR_TNF_ERROR,
1823 		    "", tnf_uint, status, status);
1824 		TAVOR_TNF_EXIT(tavor_ci_register_shared_mr);
1825 		return (status);
1826 	}
1827 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1828 
1829 	/* Fill in the mr_desc structure */
1830 	mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1831 	mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1832 	/* Only set RKey if remote access was requested */
1833 	if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1834 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1835 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1836 		mr_desc->md_rkey = mrhdl_new->mr_rkey;
1837 	}
1838 
1839 	/*
1840 	 * If shared region is mapped for streaming (i.e. noncoherent), then
1841 	 * set sync is required
1842 	 */
1843 	mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1844 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1845 
1846 	/* Return the Tavor MR handle */
1847 	*mr_p = (ibc_mr_hdl_t)mrhdl_new;
1848 
1849 	TAVOR_TNF_EXIT(tavor_ci_register_mr);
1850 	return (IBT_SUCCESS);
1851 }
1852 
1853 
1854 /*
1855  * tavor_ci_reregister_mr()
1856  *    Modify the attributes of an existing Memory Region
1857  *    Context: Can be called from interrupt or base context.
1858  */
1859 /* ARGSUSED */
1860 static ibt_status_t
tavor_ci_reregister_mr(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr,ibc_pd_hdl_t pd,ibt_mr_attr_t * mr_attr,void * ibtl_reserved,ibc_mr_hdl_t * mr_new,ibt_mr_desc_t * mr_desc)1861 tavor_ci_reregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1862     ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_new,
1863     ibt_mr_desc_t *mr_desc)
1864 {
1865 	tavor_mr_options_t	op;
1866 	tavor_state_t		*state;
1867 	tavor_pdhdl_t		pdhdl;
1868 	tavor_mrhdl_t		mrhdl, mrhdl_new;
1869 	int			status;
1870 
1871 	TAVOR_TNF_ENTER(tavor_ci_reregister_mr);
1872 
1873 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1874 
1875 	ASSERT(mr_attr != NULL);
1876 	ASSERT(mr_new != NULL);
1877 	ASSERT(mr_desc != NULL);
1878 
1879 	/* Check for valid HCA handle */
1880 	if (hca == NULL) {
1881 		TNF_PROBE_0(tavor_ci_reregister_mr_hca_inv, TAVOR_TNF_ERROR,
1882 		    "");
1883 		TAVOR_TNF_EXIT(tavor_ci_reregister_mr);
1884 		return (IBT_HCA_HDL_INVALID);
1885 	}
1886 
1887 	/* Check for valid memory region handle */
1888 	if (mr == NULL) {
1889 		TNF_PROBE_0(tavor_ci_reregister_mr_invmrhdl_fail,
1890 		    TAVOR_TNF_ERROR, "");
1891 		TAVOR_TNF_EXIT(tavor_ci_reregister_mr);
1892 		return (IBT_MR_HDL_INVALID);
1893 	}
1894 
1895 	/* Grab the Tavor softstate pointer, mrhdl, and pdhdl */
1896 	state = (tavor_state_t *)hca;
1897 	mrhdl = (tavor_mrhdl_t)mr;
1898 	pdhdl = (tavor_pdhdl_t)pd;
1899 
1900 	/* Reregister the memory region */
1901 	op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1902 	status = tavor_mr_reregister(state, mrhdl, pdhdl, mr_attr,
1903 	    &mrhdl_new, &op);
1904 	if (status != DDI_SUCCESS) {
1905 		TNF_PROBE_1(tavor_ci_reregister_mr_fail, TAVOR_TNF_ERROR, "",
1906 		    tnf_uint, status, status);
1907 		TAVOR_TNF_EXIT(tavor_ci_reregister_mr);
1908 		return (status);
1909 	}
1910 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1911 
1912 	/* Fill in the mr_desc structure */
1913 	mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1914 	mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1915 	/* Only set RKey if remote access was requested */
1916 	if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1917 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1918 	    (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1919 		mr_desc->md_rkey = mrhdl_new->mr_rkey;
1920 	}
1921 
1922 	/*
1923 	 * If region is mapped for streaming (i.e. noncoherent), then set
1924 	 * sync is required
1925 	 */
1926 	mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1927 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1928 
1929 	/* Return the Tavor MR handle */
1930 	*mr_new = (ibc_mr_hdl_t)mrhdl_new;
1931 
1932 	TAVOR_TNF_EXIT(tavor_ci_reregister_mr);
1933 	return (IBT_SUCCESS);
1934 }
1935 
1936 
1937 /*
1938  * tavor_ci_reregister_buf()
1939  *    Modify the attributes of an existing Memory Region
1940  *    Context: Can be called from interrupt or base context.
1941  */
1942 /* ARGSUSED */
1943 static ibt_status_t
tavor_ci_reregister_buf(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr,ibc_pd_hdl_t pd,ibt_smr_attr_t * attrp,struct buf * buf,void * ibtl_reserved,ibc_mr_hdl_t * mr_new,ibt_mr_desc_t * mr_desc)1944 tavor_ci_reregister_buf(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1945     ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1946     ibc_mr_hdl_t *mr_new, ibt_mr_desc_t *mr_desc)
1947 {
1948 	tavor_mr_options_t	op;
1949 	tavor_state_t		*state;
1950 	tavor_pdhdl_t		pdhdl;
1951 	tavor_mrhdl_t		mrhdl, mrhdl_new;
1952 	int			status;
1953 	ibt_mr_flags_t		flags = attrp->mr_flags;
1954 
1955 	TAVOR_TNF_ENTER(tavor_ci_reregister_buf);
1956 
1957 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1958 
1959 	ASSERT(mr_new != NULL);
1960 	ASSERT(mr_desc != NULL);
1961 
1962 	/* Check for valid HCA handle */
1963 	if (hca == NULL) {
1964 		TNF_PROBE_0(tavor_ci_reregister_buf_hca_inv, TAVOR_TNF_ERROR,
1965 		    "");
1966 		TAVOR_TNF_EXIT(tavor_ci_reregister_buf);
1967 		return (IBT_HCA_HDL_INVALID);
1968 	}
1969 
1970 	/* Check for valid memory region handle */
1971 	if (mr == NULL) {
1972 		TNF_PROBE_0(tavor_ci_reregister_buf_invmrhdl_fail,
1973 		    TAVOR_TNF_ERROR, "");
1974 		TAVOR_TNF_EXIT(tavor_ci_reregister_buf);
1975 		return (IBT_MR_HDL_INVALID);
1976 	}
1977 
1978 	/* Grab the Tavor softstate pointer, mrhdl, and pdhdl */
1979 	state = (tavor_state_t *)hca;
1980 	mrhdl = (tavor_mrhdl_t)mr;
1981 	pdhdl = (tavor_pdhdl_t)pd;
1982 
1983 	/* Reregister the memory region */
1984 	op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1985 	status = tavor_mr_reregister_buf(state, mrhdl, pdhdl, attrp, buf,
1986 	    &mrhdl_new, &op);
1987 	if (status != DDI_SUCCESS) {
1988 		TNF_PROBE_1(tavor_ci_reregister_buf_fail, TAVOR_TNF_ERROR, "",
1989 		    tnf_uint, status, status);
1990 		TAVOR_TNF_EXIT(tavor_ci_reregister_buf);
1991 		return (status);
1992 	}
1993 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1994 
1995 	/* Fill in the mr_desc structure */
1996 	mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1997 	mr_desc->md_lkey  = mrhdl_new->mr_lkey;
1998 	/* Only set RKey if remote access was requested */
1999 	if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
2000 	    (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
2001 	    (flags & IBT_MR_ENABLE_REMOTE_READ)) {
2002 		mr_desc->md_rkey = mrhdl_new->mr_rkey;
2003 	}
2004 
2005 	/*
2006 	 * If region is mapped for streaming (i.e. noncoherent), then set
2007 	 * sync is required
2008 	 */
2009 	mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
2010 	    IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
2011 
2012 	/* Return the Tavor MR handle */
2013 	*mr_new = (ibc_mr_hdl_t)mrhdl_new;
2014 
2015 	TAVOR_TNF_EXIT(tavor_ci_reregister_buf);
2016 	return (IBT_SUCCESS);
2017 }
2018 
2019 /*
2020  * tavor_ci_sync_mr()
2021  *    Synchronize access to a Memory Region
2022  *    Context: Can be called from interrupt or base context.
2023  */
2024 static ibt_status_t
tavor_ci_sync_mr(ibc_hca_hdl_t hca,ibt_mr_sync_t * mr_segs,size_t num_segs)2025 tavor_ci_sync_mr(ibc_hca_hdl_t hca, ibt_mr_sync_t *mr_segs, size_t num_segs)
2026 {
2027 	tavor_state_t		*state;
2028 	int			status;
2029 
2030 	TAVOR_TNF_ENTER(tavor_ci_sync_mr);
2031 
2032 	ASSERT(mr_segs != NULL);
2033 
2034 	/* Check for valid HCA handle */
2035 	if (hca == NULL) {
2036 		TNF_PROBE_0(tavor_ci_sync_mr_invhca_fail,
2037 		    TAVOR_TNF_ERROR, "");
2038 		TAVOR_TNF_EXIT(tavor_ci_sync_mr);
2039 		return (IBT_HCA_HDL_INVALID);
2040 	}
2041 
2042 	/* Grab the Tavor softstate pointer */
2043 	state = (tavor_state_t *)hca;
2044 
2045 	/* Sync the memory region */
2046 	status = tavor_mr_sync(state, mr_segs, num_segs);
2047 	if (status != DDI_SUCCESS) {
2048 		TNF_PROBE_1(tavor_ci_sync_mr_fail, TAVOR_TNF_ERROR, "",
2049 		    tnf_uint, status, status);
2050 		TAVOR_TNF_EXIT(tavor_ci_sync_mr);
2051 		return (status);
2052 	}
2053 
2054 	TAVOR_TNF_EXIT(tavor_ci_sync_mr);
2055 	return (IBT_SUCCESS);
2056 }
2057 
2058 
2059 /*
2060  * tavor_ci_alloc_mw()
2061  *    Allocate a Memory Window
2062  *    Context: Can be called from interrupt or base context.
2063  */
2064 static ibt_status_t
tavor_ci_alloc_mw(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_mw_flags_t flags,ibc_mw_hdl_t * mw_p,ibt_rkey_t * rkey_p)2065 tavor_ci_alloc_mw(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, ibt_mw_flags_t flags,
2066     ibc_mw_hdl_t *mw_p, ibt_rkey_t *rkey_p)
2067 {
2068 	tavor_state_t		*state;
2069 	tavor_pdhdl_t		pdhdl;
2070 	tavor_mwhdl_t		mwhdl;
2071 	int			status;
2072 
2073 	TAVOR_TNF_ENTER(tavor_ci_alloc_mw);
2074 
2075 	ASSERT(mw_p != NULL);
2076 	ASSERT(rkey_p != NULL);
2077 
2078 	/* Check for valid HCA handle */
2079 	if (hca == NULL) {
2080 		TNF_PROBE_0(tavor_ci_alloc_mw_invhca_fail,
2081 		    TAVOR_TNF_ERROR, "");
2082 		TAVOR_TNF_EXIT(tavor_ci_alloc_mw);
2083 		return (IBT_HCA_HDL_INVALID);
2084 	}
2085 
2086 	/* Check for valid PD handle pointer */
2087 	if (pd == NULL) {
2088 		TNF_PROBE_0(tavor_ci_alloc_mw_invpdhdl_fail,
2089 		    TAVOR_TNF_ERROR, "");
2090 		TAVOR_TNF_EXIT(tavor_ci_alloc_mw);
2091 		return (IBT_PD_HDL_INVALID);
2092 	}
2093 
2094 	/* Grab the Tavor softstate pointer and PD handle */
2095 	state = (tavor_state_t *)hca;
2096 	pdhdl = (tavor_pdhdl_t)pd;
2097 
2098 	/* Allocate the memory window */
2099 	status = tavor_mw_alloc(state, pdhdl, flags, &mwhdl);
2100 	if (status != DDI_SUCCESS) {
2101 		TNF_PROBE_1(tavor_ci_alloc_mw_fail, TAVOR_TNF_ERROR, "",
2102 		    tnf_uint, status, status);
2103 		TAVOR_TNF_EXIT(tavor_ci_alloc_mw);
2104 		return (status);
2105 	}
2106 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mwhdl))
2107 
2108 	/* Return the MW handle and RKey */
2109 	*mw_p = (ibc_mw_hdl_t)mwhdl;
2110 	*rkey_p = mwhdl->mr_rkey;
2111 
2112 	TAVOR_TNF_EXIT(tavor_ci_alloc_mw);
2113 	return (IBT_SUCCESS);
2114 }
2115 
2116 
2117 /*
2118  * tavor_ci_free_mw()
2119  *    Free a Memory Window
2120  *    Context: Can be called from interrupt or base context.
2121  */
2122 static ibt_status_t
tavor_ci_free_mw(ibc_hca_hdl_t hca,ibc_mw_hdl_t mw)2123 tavor_ci_free_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw)
2124 {
2125 	tavor_state_t		*state;
2126 	tavor_mwhdl_t		mwhdl;
2127 	int			status;
2128 
2129 	TAVOR_TNF_ENTER(tavor_ci_free_mw);
2130 
2131 	/* Check for valid HCA handle */
2132 	if (hca == NULL) {
2133 		TNF_PROBE_0(tavor_ci_free_mw_invhca_fail,
2134 		    TAVOR_TNF_ERROR, "");
2135 		TAVOR_TNF_EXIT(tavor_ci_free_mw);
2136 		return (IBT_HCA_HDL_INVALID);
2137 	}
2138 
2139 	/* Check for valid MW handle */
2140 	if (mw == NULL) {
2141 		TNF_PROBE_0(tavor_ci_free_mw_invmwhdl_fail,
2142 		    TAVOR_TNF_ERROR, "");
2143 		TAVOR_TNF_EXIT(tavor_ci_free_mw);
2144 		return (IBT_MW_HDL_INVALID);
2145 	}
2146 
2147 	/* Grab the Tavor softstate pointer and MW handle */
2148 	state = (tavor_state_t *)hca;
2149 	mwhdl = (tavor_mwhdl_t)mw;
2150 
2151 	/* Free the memory window */
2152 	status = tavor_mw_free(state, &mwhdl, TAVOR_NOSLEEP);
2153 	if (status != DDI_SUCCESS) {
2154 		TNF_PROBE_1(tavor_ci_free_mw_fail, TAVOR_TNF_ERROR, "",
2155 		    tnf_uint, status, status);
2156 		TAVOR_TNF_EXIT(tavor_ci_free_mw);
2157 		return (status);
2158 	}
2159 
2160 	TAVOR_TNF_EXIT(tavor_ci_free_mw);
2161 	return (IBT_SUCCESS);
2162 }
2163 
2164 
2165 /*
2166  * tavor_ci_query_mw()
2167  *    Return the attributes of the specified Memory Window
2168  *    Context: Can be called from interrupt or base context.
2169  */
2170 static ibt_status_t
tavor_ci_query_mw(ibc_hca_hdl_t hca,ibc_mw_hdl_t mw,ibt_mw_query_attr_t * mw_attr_p)2171 tavor_ci_query_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw,
2172     ibt_mw_query_attr_t *mw_attr_p)
2173 {
2174 	tavor_mwhdl_t		mwhdl;
2175 
2176 	TAVOR_TNF_ENTER(tavor_ci_query_mw);
2177 
2178 	ASSERT(mw_attr_p != NULL);
2179 
2180 	/* Check for valid HCA handle */
2181 	if (hca == NULL) {
2182 		TNF_PROBE_0(tavor_ci_query_mw_invhca_fail,
2183 		    TAVOR_TNF_ERROR, "");
2184 		TAVOR_TNF_EXIT(tavor_ci_query_mw);
2185 		return (IBT_HCA_HDL_INVALID);
2186 	}
2187 
2188 	/* Check for valid MemWin handle */
2189 	if (mw == NULL) {
2190 		TNF_PROBE_0(tavor_ci_query_mw_inc_mwhdl_fail,
2191 		    TAVOR_TNF_ERROR, "");
2192 		TAVOR_TNF_EXIT(tavor_ci_query_mw);
2193 		return (IBT_MW_HDL_INVALID);
2194 	}
2195 
2196 	/* Query the memory window pointer and fill in the return values */
2197 	mwhdl = (tavor_mwhdl_t)mw;
2198 	mutex_enter(&mwhdl->mr_lock);
2199 	mw_attr_p->mw_pd   = (ibc_pd_hdl_t)mwhdl->mr_pdhdl;
2200 	mw_attr_p->mw_rkey = mwhdl->mr_rkey;
2201 	mutex_exit(&mwhdl->mr_lock);
2202 
2203 	TAVOR_TNF_EXIT(tavor_ci_query_mw);
2204 	return (IBT_SUCCESS);
2205 }
2206 
2207 
2208 /* ARGSUSED */
2209 static ibt_status_t
tavor_ci_register_dma_mr(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_dmr_attr_t * mr_attr,void * ibtl_reserved,ibc_mr_hdl_t * mr_p,ibt_mr_desc_t * mr_desc)2210 tavor_ci_register_dma_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2211     ibt_dmr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
2212     ibt_mr_desc_t *mr_desc)
2213 {
2214 	tavor_state_t		*state;
2215 	tavor_pdhdl_t		pdhdl;
2216 	tavor_mrhdl_t		mrhdl;
2217 	int			status;
2218 
2219 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
2220 
2221 	ASSERT(mr_attr != NULL);
2222 	ASSERT(mr_p != NULL);
2223 	ASSERT(mr_desc != NULL);
2224 
2225 	/* Check for valid HCA handle */
2226 	if (hca == NULL) {
2227 		return (IBT_HCA_HDL_INVALID);
2228 	}
2229 
2230 	/* Check for valid PD handle pointer */
2231 	if (pd == NULL) {
2232 		return (IBT_PD_HDL_INVALID);
2233 	}
2234 
2235 	/*
2236 	 * Validate the access flags.  Both Remote Write and Remote Atomic
2237 	 * require the Local Write flag to be set
2238 	 */
2239 	if (((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
2240 	    (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
2241 	    !(mr_attr->dmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
2242 		return (IBT_MR_ACCESS_REQ_INVALID);
2243 	}
2244 
2245 	/* Grab the Tavor softstate pointer and PD handle */
2246 	state = (tavor_state_t *)hca;
2247 	pdhdl = (tavor_pdhdl_t)pd;
2248 
2249 	status = tavor_dma_mr_register(state, pdhdl, mr_attr, &mrhdl);
2250 	if (status != DDI_SUCCESS) {
2251 		return (status);
2252 	}
2253 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
2254 
2255 	/* Fill in the mr_desc structure */
2256 	mr_desc->md_vaddr = mr_attr->dmr_paddr;
2257 	mr_desc->md_lkey  = mrhdl->mr_lkey;
2258 	/* Only set RKey if remote access was requested */
2259 	if ((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
2260 	    (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
2261 	    (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
2262 		mr_desc->md_rkey = mrhdl->mr_rkey;
2263 	}
2264 
2265 	/*
2266 	 * If region is mapped for streaming (i.e. noncoherent), then set
2267 	 * sync is required
2268 	 */
2269 	mr_desc->md_sync_required = B_FALSE;
2270 
2271 	/* Return the Hermon MR handle */
2272 	*mr_p = (ibc_mr_hdl_t)mrhdl;
2273 
2274 	return (IBT_SUCCESS);
2275 }
2276 
2277 
2278 /*
2279  * tavor_ci_attach_mcg()
2280  *    Attach a Queue Pair to a Multicast Group
2281  *    Context: Can be called only from user or kernel context.
2282  */
2283 static ibt_status_t
tavor_ci_attach_mcg(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ib_gid_t gid,ib_lid_t lid)2284 tavor_ci_attach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
2285     ib_lid_t lid)
2286 {
2287 	tavor_state_t		*state;
2288 	tavor_qphdl_t		qphdl;
2289 	int			status;
2290 
2291 	TAVOR_TNF_ENTER(tavor_ci_attach_mcg);
2292 
2293 	/* Check for valid HCA handle */
2294 	if (hca == NULL) {
2295 		TNF_PROBE_0(tavor_ci_attach_mcg_invhca_fail,
2296 		    TAVOR_TNF_ERROR, "");
2297 		TAVOR_TNF_EXIT(tavor_ci_attach_mcg);
2298 		return (IBT_HCA_HDL_INVALID);
2299 	}
2300 
2301 	/* Check for valid QP handle pointer */
2302 	if (qp == NULL) {
2303 		TNF_PROBE_0(tavor_ci_attach_mcg_invqphdl_fail,
2304 		    TAVOR_TNF_ERROR, "");
2305 		TAVOR_TNF_EXIT(tavor_ci_attach_mcg);
2306 		return (IBT_QP_HDL_INVALID);
2307 	}
2308 
2309 	/* Grab the Tavor softstate pointer and QP handles */
2310 	state = (tavor_state_t *)hca;
2311 	qphdl = (tavor_qphdl_t)qp;
2312 
2313 	/* Attach the QP to the multicast group */
2314 	status = tavor_mcg_attach(state, qphdl, gid, lid);
2315 	if (status != DDI_SUCCESS) {
2316 		TNF_PROBE_1(tavor_ci_attach_mcg_fail, TAVOR_TNF_ERROR, "",
2317 		    tnf_uint, status, status);
2318 		TAVOR_TNF_EXIT(tavor_ci_attach_mcg);
2319 		return (status);
2320 	}
2321 
2322 	TAVOR_TNF_EXIT(tavor_ci_attach_mcg);
2323 	return (IBT_SUCCESS);
2324 }
2325 
2326 
2327 /*
2328  * tavor_ci_detach_mcg()
2329  *    Detach a Queue Pair to a Multicast Group
2330  *    Context: Can be called only from user or kernel context.
2331  */
2332 static ibt_status_t
tavor_ci_detach_mcg(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ib_gid_t gid,ib_lid_t lid)2333 tavor_ci_detach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
2334     ib_lid_t lid)
2335 {
2336 	tavor_state_t		*state;
2337 	tavor_qphdl_t		qphdl;
2338 	int			status;
2339 
2340 	TAVOR_TNF_ENTER(tavor_ci_attach_mcg);
2341 
2342 	/* Check for valid HCA handle */
2343 	if (hca == NULL) {
2344 		TNF_PROBE_0(tavor_ci_detach_mcg_invhca_fail,
2345 		    TAVOR_TNF_ERROR, "");
2346 		TAVOR_TNF_EXIT(tavor_ci_detach_mcg);
2347 		return (IBT_HCA_HDL_INVALID);
2348 	}
2349 
2350 	/* Check for valid QP handle pointer */
2351 	if (qp == NULL) {
2352 		TNF_PROBE_0(tavor_ci_detach_mcg_invqphdl_fail,
2353 		    TAVOR_TNF_ERROR, "");
2354 		TAVOR_TNF_EXIT(tavor_ci_detach_mcg);
2355 		return (IBT_QP_HDL_INVALID);
2356 	}
2357 
2358 	/* Grab the Tavor softstate pointer and QP handle */
2359 	state = (tavor_state_t *)hca;
2360 	qphdl = (tavor_qphdl_t)qp;
2361 
2362 	/* Detach the QP from the multicast group */
2363 	status = tavor_mcg_detach(state, qphdl, gid, lid);
2364 	if (status != DDI_SUCCESS) {
2365 		TNF_PROBE_1(tavor_ci_detach_mcg_fail, TAVOR_TNF_ERROR, "",
2366 		    tnf_uint, status, status);
2367 		TAVOR_TNF_EXIT(tavor_ci_detach_mcg);
2368 		return (status);
2369 	}
2370 
2371 	TAVOR_TNF_EXIT(tavor_ci_detach_mcg);
2372 	return (IBT_SUCCESS);
2373 }
2374 
2375 
2376 /*
2377  * tavor_ci_post_send()
2378  *    Post send work requests to the send queue on the specified QP
2379  *    Context: Can be called from interrupt or base context.
2380  */
2381 static ibt_status_t
tavor_ci_post_send(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ibt_send_wr_t * wr_p,uint_t num_wr,uint_t * num_posted_p)2382 tavor_ci_post_send(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_send_wr_t *wr_p,
2383     uint_t num_wr, uint_t *num_posted_p)
2384 {
2385 	tavor_state_t		*state;
2386 	tavor_qphdl_t		qphdl;
2387 	int			status;
2388 
2389 	TAVOR_TNF_ENTER(tavor_ci_post_send);
2390 
2391 	ASSERT(wr_p != NULL);
2392 	ASSERT(num_wr != 0);
2393 
2394 	/* Check for valid HCA handle */
2395 	if (hca == NULL) {
2396 		TNF_PROBE_0(tavor_ci_post_send_invhca_fail,
2397 		    TAVOR_TNF_ERROR, "");
2398 		TAVOR_TNF_EXIT(tavor_ci_post_send);
2399 		return (IBT_HCA_HDL_INVALID);
2400 	}
2401 
2402 	/* Check for valid QP handle pointer */
2403 	if (qp == NULL) {
2404 		TNF_PROBE_0(tavor_ci_post_send_invqphdl_fail,
2405 		    TAVOR_TNF_ERROR, "");
2406 		TAVOR_TNF_EXIT(tavor_ci_post_send);
2407 		return (IBT_QP_HDL_INVALID);
2408 	}
2409 
2410 	/* Grab the Tavor softstate pointer and QP handle */
2411 	state = (tavor_state_t *)hca;
2412 	qphdl = (tavor_qphdl_t)qp;
2413 
2414 	/* Post the send WQEs */
2415 	status = tavor_post_send(state, qphdl, wr_p, num_wr, num_posted_p);
2416 	if (status != DDI_SUCCESS) {
2417 		TNF_PROBE_1(tavor_ci_post_send_fail, TAVOR_TNF_ERROR, "",
2418 		    tnf_uint, status, status);
2419 		TAVOR_TNF_EXIT(tavor_ci_post_send);
2420 		return (status);
2421 	}
2422 
2423 	TAVOR_TNF_EXIT(tavor_ci_post_send);
2424 	return (IBT_SUCCESS);
2425 }
2426 
2427 
2428 /*
2429  * tavor_ci_post_recv()
2430  *    Post receive work requests to the receive queue on the specified QP
2431  *    Context: Can be called from interrupt or base context.
2432  */
2433 static ibt_status_t
tavor_ci_post_recv(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ibt_recv_wr_t * wr_p,uint_t num_wr,uint_t * num_posted_p)2434 tavor_ci_post_recv(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_recv_wr_t *wr_p,
2435     uint_t num_wr, uint_t *num_posted_p)
2436 {
2437 	tavor_state_t		*state;
2438 	tavor_qphdl_t		qphdl;
2439 	int			status;
2440 
2441 	TAVOR_TNF_ENTER(tavor_ci_post_recv);
2442 
2443 	ASSERT(wr_p != NULL);
2444 	ASSERT(num_wr != 0);
2445 
2446 	/* Check for valid HCA handle */
2447 	if (hca == NULL) {
2448 		TNF_PROBE_0(tavor_ci_post_recv_invhca_fail,
2449 		    TAVOR_TNF_ERROR, "");
2450 		TAVOR_TNF_EXIT(tavor_ci_post_recv);
2451 		return (IBT_HCA_HDL_INVALID);
2452 	}
2453 
2454 	/* Check for valid QP handle pointer */
2455 	if (qp == NULL) {
2456 		TNF_PROBE_0(tavor_ci_post_recv_invqphdl_fail,
2457 		    TAVOR_TNF_ERROR, "");
2458 		TAVOR_TNF_EXIT(tavor_ci_post_recv);
2459 		return (IBT_QP_HDL_INVALID);
2460 	}
2461 
2462 	/* Grab the Tavor softstate pointer and QP handle */
2463 	state = (tavor_state_t *)hca;
2464 	qphdl = (tavor_qphdl_t)qp;
2465 
2466 	/* Post the receive WQEs */
2467 	status = tavor_post_recv(state, qphdl, wr_p, num_wr, num_posted_p);
2468 	if (status != DDI_SUCCESS) {
2469 		TNF_PROBE_1(tavor_ci_post_recv_fail, TAVOR_TNF_ERROR, "",
2470 		    tnf_uint, status, status);
2471 		TAVOR_TNF_EXIT(tavor_ci_post_recv);
2472 		return (status);
2473 	}
2474 
2475 	TAVOR_TNF_EXIT(tavor_ci_post_recv);
2476 	return (IBT_SUCCESS);
2477 }
2478 
2479 
2480 /*
2481  * tavor_ci_poll_cq()
2482  *    Poll for a work request completion
2483  *    Context: Can be called from interrupt or base context.
2484  */
2485 static ibt_status_t
tavor_ci_poll_cq(ibc_hca_hdl_t hca,ibc_cq_hdl_t cq,ibt_wc_t * wc_p,uint_t num_wc,uint_t * num_polled)2486 tavor_ci_poll_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, ibt_wc_t *wc_p,
2487     uint_t num_wc, uint_t *num_polled)
2488 {
2489 	tavor_state_t		*state;
2490 	tavor_cqhdl_t		cqhdl;
2491 	uint_t			polled;
2492 	int			status;
2493 
2494 	TAVOR_TNF_ENTER(tavor_ci_poll_cq);
2495 
2496 	ASSERT(wc_p != NULL);
2497 
2498 	/* Check for valid HCA handle */
2499 	if (hca == NULL) {
2500 		TNF_PROBE_0(tavor_ci_poll_cq_invhca_fail,
2501 		    TAVOR_TNF_ERROR, "");
2502 		TAVOR_TNF_EXIT(tavor_ci_poll_cq);
2503 		return (IBT_HCA_HDL_INVALID);
2504 	}
2505 
2506 	/* Check for valid CQ handle pointer */
2507 	if (cq == NULL) {
2508 		TNF_PROBE_0(tavor_ci_poll_cq_invcqhdl_fail,
2509 		    TAVOR_TNF_ERROR, "");
2510 		TAVOR_TNF_EXIT(tavor_ci_poll_cq);
2511 		return (IBT_CQ_HDL_INVALID);
2512 	}
2513 
2514 	/* Check for valid num_wc field */
2515 	if (num_wc == 0) {
2516 		TNF_PROBE_0(tavor_ci_poll_cq_num_wc_fail,
2517 		    TAVOR_TNF_ERROR, "");
2518 		TAVOR_TNF_EXIT(tavor_ci_poll_cq);
2519 		return (IBT_INVALID_PARAM);
2520 	}
2521 
2522 	/* Grab the Tavor softstate pointer and CQ handle */
2523 	state = (tavor_state_t *)hca;
2524 	cqhdl = (tavor_cqhdl_t)cq;
2525 
2526 	/* Poll for work request completions */
2527 	status = tavor_cq_poll(state, cqhdl, wc_p, num_wc, &polled);
2528 
2529 	/* First fill in "num_polled" argument (only when valid) */
2530 	if (num_polled) {
2531 		*num_polled = polled;
2532 	}
2533 
2534 	/*
2535 	 * Check the status code;
2536 	 *   If empty, we return empty.
2537 	 *   If error, we print out an error and then return
2538 	 *   If success (something was polled), we return success
2539 	 */
2540 	if (status != DDI_SUCCESS) {
2541 		if (status != IBT_CQ_EMPTY) {
2542 			TNF_PROBE_1(tavor_ci_poll_cq_fail, TAVOR_TNF_ERROR, "",
2543 			    tnf_uint, status, status);
2544 		}
2545 		TAVOR_TNF_EXIT(tavor_ci_poll_cq);
2546 		return (status);
2547 	}
2548 
2549 	TAVOR_TNF_EXIT(tavor_ci_poll_cq);
2550 	return (IBT_SUCCESS);
2551 }
2552 
2553 
2554 /*
2555  * tavor_ci_notify_cq()
2556  *    Enable notification events on the specified CQ
2557  *    Context: Can be called from interrupt or base context.
2558  */
2559 static ibt_status_t
tavor_ci_notify_cq(ibc_hca_hdl_t hca,ibc_cq_hdl_t cq_hdl,ibt_cq_notify_flags_t flags)2560 tavor_ci_notify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq_hdl,
2561     ibt_cq_notify_flags_t flags)
2562 {
2563 	tavor_state_t		*state;
2564 	tavor_cqhdl_t		cqhdl;
2565 	int			status;
2566 
2567 	TAVOR_TNF_ENTER(tavor_ci_notify_cq);
2568 
2569 	/* Check for valid HCA handle */
2570 	if (hca == NULL) {
2571 		TNF_PROBE_0(tavor_ci_notify_cq_invhca_fail,
2572 		    TAVOR_TNF_ERROR, "");
2573 		TAVOR_TNF_EXIT(tavor_ci_notify_cq);
2574 		return (IBT_HCA_HDL_INVALID);
2575 	}
2576 
2577 	/* Check for valid CQ handle pointer */
2578 	if (cq_hdl == NULL) {
2579 		TNF_PROBE_0(tavor_ci_notify_cq_invcqhdl_fail,
2580 		    TAVOR_TNF_ERROR, "");
2581 		TAVOR_TNF_EXIT(tavor_ci_notify_cq);
2582 		return (IBT_CQ_HDL_INVALID);
2583 	}
2584 
2585 	/* Grab the Tavor softstate pointer and CQ handle */
2586 	state = (tavor_state_t *)hca;
2587 	cqhdl = (tavor_cqhdl_t)cq_hdl;
2588 
2589 	/* Enable the CQ notification */
2590 	status = tavor_cq_notify(state, cqhdl, flags);
2591 	if (status != DDI_SUCCESS) {
2592 		TNF_PROBE_1(tavor_ci_notify_cq_fail, TAVOR_TNF_ERROR, "",
2593 		    tnf_uint, status, status);
2594 		TAVOR_TNF_EXIT(tavor_ci_notify_cq);
2595 		return (status);
2596 	}
2597 
2598 	TAVOR_TNF_EXIT(tavor_ci_notify_cq);
2599 	return (IBT_SUCCESS);
2600 }
2601 
2602 /*
2603  * tavor_ci_ci_data_in()
2604  *    Exchange CI-specific data.
2605  *    Context: Can be called only from user or kernel context.
2606  */
2607 static ibt_status_t
tavor_ci_ci_data_in(ibc_hca_hdl_t hca,ibt_ci_data_flags_t flags,ibt_object_type_t object,void * ibc_object_handle,void * data_p,size_t data_sz)2608 tavor_ci_ci_data_in(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
2609     ibt_object_type_t object, void *ibc_object_handle, void *data_p,
2610     size_t data_sz)
2611 {
2612 	tavor_state_t		*state;
2613 	int			status;
2614 
2615 	TAVOR_TNF_ENTER(tavor_ci_ci_data_in);
2616 
2617 	/* Check for valid HCA handle */
2618 	if (hca == NULL) {
2619 		TNF_PROBE_0(tavor_ci_ci_data_in_invhca_fail,
2620 		    TAVOR_TNF_ERROR, "");
2621 		TAVOR_TNF_EXIT(tavor_ci_ci_data_in);
2622 		return (IBT_HCA_HDL_INVALID);
2623 	}
2624 
2625 	/* Grab the Tavor softstate pointer */
2626 	state = (tavor_state_t *)hca;
2627 
2628 	/* Get the Tavor userland mapping information */
2629 	status = tavor_umap_ci_data_in(state, flags, object,
2630 	    ibc_object_handle, data_p, data_sz);
2631 	if (status != DDI_SUCCESS) {
2632 		TNF_PROBE_1(tavor_ci_ci_data_in_umap_fail, TAVOR_TNF_ERROR,
2633 		    "", tnf_uint, status, status);
2634 		TAVOR_TNF_EXIT(tavor_ci_ci_data_in);
2635 		return (status);
2636 	}
2637 
2638 	TAVOR_TNF_EXIT(tavor_ci_ci_data_in);
2639 	return (IBT_SUCCESS);
2640 }
2641 
2642 /*
2643  * tavor_ci_ci_data_out()
2644  *    Exchange CI-specific data.
2645  *    Context: Can be called only from user or kernel context.
2646  */
2647 static ibt_status_t
tavor_ci_ci_data_out(ibc_hca_hdl_t hca,ibt_ci_data_flags_t flags,ibt_object_type_t object,void * ibc_object_handle,void * data_p,size_t data_sz)2648 tavor_ci_ci_data_out(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
2649     ibt_object_type_t object, void *ibc_object_handle, void *data_p,
2650     size_t data_sz)
2651 {
2652 	tavor_state_t		*state;
2653 	int			status;
2654 
2655 	TAVOR_TNF_ENTER(tavor_ci_ci_data_out);
2656 
2657 	/* Check for valid HCA handle */
2658 	if (hca == NULL) {
2659 		TNF_PROBE_0(tavor_ci_ci_data_out_invhca_fail,
2660 		    TAVOR_TNF_ERROR, "");
2661 		TAVOR_TNF_EXIT(tavor_ci_ci_data_out);
2662 		return (IBT_HCA_HDL_INVALID);
2663 	}
2664 
2665 	/* Grab the Tavor softstate pointer */
2666 	state = (tavor_state_t *)hca;
2667 
2668 	/* Get the Tavor userland mapping information */
2669 	status = tavor_umap_ci_data_out(state, flags, object,
2670 	    ibc_object_handle, data_p, data_sz);
2671 	if (status != DDI_SUCCESS) {
2672 		TNF_PROBE_1(tavor_ci_ci_data_out_umap_fail, TAVOR_TNF_ERROR,
2673 		    "", tnf_uint, status, status);
2674 		TAVOR_TNF_EXIT(tavor_ci_ci_data_out);
2675 		return (status);
2676 	}
2677 
2678 	TAVOR_TNF_EXIT(tavor_ci_ci_data_out);
2679 	return (IBT_SUCCESS);
2680 }
2681 
2682 
2683 /*
2684  * tavor_ci_alloc_srq()
2685  *    Allocate a Shared Receive Queue (SRQ)
2686  *    Context: Can be called only from user or kernel context
2687  */
2688 static ibt_status_t
tavor_ci_alloc_srq(ibc_hca_hdl_t hca,ibt_srq_flags_t flags,ibt_srq_hdl_t ibt_srq,ibc_pd_hdl_t pd,ibt_srq_sizes_t * sizes,ibc_srq_hdl_t * ibc_srq_p,ibt_srq_sizes_t * ret_sizes_p)2689 tavor_ci_alloc_srq(ibc_hca_hdl_t hca, ibt_srq_flags_t flags,
2690     ibt_srq_hdl_t ibt_srq, ibc_pd_hdl_t pd, ibt_srq_sizes_t *sizes,
2691     ibc_srq_hdl_t *ibc_srq_p, ibt_srq_sizes_t *ret_sizes_p)
2692 {
2693 	tavor_state_t		*state;
2694 	tavor_pdhdl_t		pdhdl;
2695 	tavor_srqhdl_t		srqhdl;
2696 	tavor_srq_info_t	srqinfo;
2697 	tavor_srq_options_t	op;
2698 	int			status;
2699 
2700 	TAVOR_TNF_ENTER(tavor_ci_alloc_srq);
2701 
2702 	/* Check for valid HCA handle */
2703 	if (hca == NULL) {
2704 		TNF_PROBE_0(tavor_ci_alloc_srq_invhca_fail,
2705 		    TAVOR_TNF_ERROR, "");
2706 		TAVOR_TNF_EXIT(tavor_alloc_srq);
2707 		return (IBT_HCA_HDL_INVALID);
2708 	}
2709 
2710 	state = (tavor_state_t *)hca;
2711 
2712 	/* Check if SRQ is even supported */
2713 	if (state->ts_cfg_profile->cp_srq_enable == 0) {
2714 		TNF_PROBE_0(tavor_ci_alloc_srq_not_supported_fail,
2715 		    TAVOR_TNF_ERROR, "");
2716 		TAVOR_TNF_EXIT(tavor_ci_alloc_srq);
2717 		return (IBT_NOT_SUPPORTED);
2718 	}
2719 
2720 	/* Check for valid PD handle pointer */
2721 	if (pd == NULL) {
2722 		TNF_PROBE_0(tavor_ci_alloc_srq_invpdhdl_fail,
2723 		    TAVOR_TNF_ERROR, "");
2724 		TAVOR_TNF_EXIT(tavor_ci_alloc_srq);
2725 		return (IBT_PD_HDL_INVALID);
2726 	}
2727 
2728 	pdhdl = (tavor_pdhdl_t)pd;
2729 
2730 	srqinfo.srqi_ibt_srqhdl = ibt_srq;
2731 	srqinfo.srqi_pd		= pdhdl;
2732 	srqinfo.srqi_sizes	= sizes;
2733 	srqinfo.srqi_real_sizes	= ret_sizes_p;
2734 	srqinfo.srqi_srqhdl	= &srqhdl;
2735 	srqinfo.srqi_flags	= flags;
2736 	op.srqo_wq_loc		= state->ts_cfg_profile->cp_srq_wq_inddr;
2737 	status = tavor_srq_alloc(state, &srqinfo, TAVOR_NOSLEEP, &op);
2738 	if (status != DDI_SUCCESS) {
2739 		TAVOR_TNF_EXIT(tavor_ci_alloc_srq);
2740 		return (status);
2741 	}
2742 
2743 	*ibc_srq_p = (ibc_srq_hdl_t)srqhdl;
2744 
2745 	TAVOR_TNF_EXIT(tavor_ci_alloc_srq);
2746 	return (IBT_SUCCESS);
2747 }
2748 
2749 /*
2750  * tavor_ci_free_srq()
2751  *    Free a Shared Receive Queue (SRQ)
2752  *    Context: Can be called only from user or kernel context
2753  */
2754 static ibt_status_t
tavor_ci_free_srq(ibc_hca_hdl_t hca,ibc_srq_hdl_t srq)2755 tavor_ci_free_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq)
2756 {
2757 	tavor_state_t	*state;
2758 	tavor_srqhdl_t	srqhdl;
2759 	int		status;
2760 
2761 	TAVOR_TNF_ENTER(tavor_ci_free_srq);
2762 
2763 	/* Check for valid HCA handle */
2764 	if (hca == NULL) {
2765 		TNF_PROBE_0(tavor_ci_free_srq_invhca_fail,
2766 		    TAVOR_TNF_ERROR, "");
2767 		TAVOR_TNF_EXIT(tavor_ci_free_srq);
2768 		return (IBT_HCA_HDL_INVALID);
2769 	}
2770 
2771 	state = (tavor_state_t *)hca;
2772 
2773 	/* Check if SRQ is even supported */
2774 	if (state->ts_cfg_profile->cp_srq_enable == 0) {
2775 		TNF_PROBE_0(tavor_ci_alloc_srq_not_supported_fail,
2776 		    TAVOR_TNF_ERROR, "");
2777 		TAVOR_TNF_EXIT(tavor_ci_free_srq);
2778 		return (IBT_NOT_SUPPORTED);
2779 	}
2780 
2781 	/* Check for valid SRQ handle pointer */
2782 	if (srq == NULL) {
2783 		TNF_PROBE_0(tavor_ci_free_srq_invsrqhdl_fail,
2784 		    TAVOR_TNF_ERROR, "");
2785 		TAVOR_TNF_EXIT(tavor_ci_free_srq);
2786 		return (IBT_SRQ_HDL_INVALID);
2787 	}
2788 
2789 	srqhdl = (tavor_srqhdl_t)srq;
2790 
2791 	/* Free the SRQ */
2792 	status = tavor_srq_free(state, &srqhdl, TAVOR_NOSLEEP);
2793 	if (status != DDI_SUCCESS) {
2794 		TNF_PROBE_1(tavor_ci_free_srq_fail, TAVOR_TNF_ERROR, "",
2795 		    tnf_uint, status, status);
2796 		TAVOR_TNF_EXIT(tavor_ci_free_srq);
2797 		return (status);
2798 	}
2799 
2800 	TAVOR_TNF_EXIT(tavor_ci_free_srq);
2801 	return (IBT_SUCCESS);
2802 }
2803 
2804 /*
2805  * tavor_ci_query_srq()
2806  *    Query properties of a Shared Receive Queue (SRQ)
2807  *    Context: Can be called from interrupt or base context.
2808  */
2809 static ibt_status_t
tavor_ci_query_srq(ibc_hca_hdl_t hca,ibc_srq_hdl_t srq,ibc_pd_hdl_t * pd_p,ibt_srq_sizes_t * sizes_p,uint_t * limit_p)2810 tavor_ci_query_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq, ibc_pd_hdl_t *pd_p,
2811     ibt_srq_sizes_t *sizes_p, uint_t *limit_p)
2812 {
2813 	tavor_state_t	*state;
2814 	tavor_srqhdl_t	srqhdl;
2815 
2816 	TAVOR_TNF_ENTER(tavor_ci_query_srq);
2817 
2818 	/* Check for valid HCA handle */
2819 	if (hca == NULL) {
2820 		TNF_PROBE_0(tavor_ci_query_srq_invhca_fail,
2821 		    TAVOR_TNF_ERROR, "");
2822 		TAVOR_TNF_EXIT(tavor_ci_query_srq);
2823 		return (IBT_HCA_HDL_INVALID);
2824 	}
2825 
2826 	state = (tavor_state_t *)hca;
2827 
2828 	/* Check if SRQ is even supported */
2829 	if (state->ts_cfg_profile->cp_srq_enable == 0) {
2830 		TNF_PROBE_0(tavor_ci_query_srq_not_supported_fail,
2831 		    TAVOR_TNF_ERROR, "");
2832 		TAVOR_TNF_EXIT(tavor_ci_query_srq);
2833 		return (IBT_NOT_SUPPORTED);
2834 	}
2835 
2836 	/* Check for valid SRQ handle pointer */
2837 	if (srq == NULL) {
2838 		TNF_PROBE_0(tavor_ci_query_srq_invsrqhdl_fail,
2839 		    TAVOR_TNF_ERROR, "");
2840 		TAVOR_TNF_EXIT(tavor_ci_query_srq);
2841 		return (IBT_SRQ_HDL_INVALID);
2842 	}
2843 
2844 	srqhdl = (tavor_srqhdl_t)srq;
2845 
2846 	mutex_enter(&srqhdl->srq_lock);
2847 	if (srqhdl->srq_state == TAVOR_SRQ_STATE_ERROR) {
2848 		mutex_exit(&srqhdl->srq_lock);
2849 		TNF_PROBE_0(tavor_ci_query_srq_error_state,
2850 		    TAVOR_TNF_ERROR, "");
2851 		TAVOR_TNF_EXIT(tavor_ci_query_srq);
2852 		return (IBT_SRQ_ERROR_STATE);
2853 	}
2854 
2855 	*pd_p   = (ibc_pd_hdl_t)srqhdl->srq_pdhdl;
2856 	sizes_p->srq_wr_sz = srqhdl->srq_real_sizes.srq_wr_sz;
2857 	sizes_p->srq_sgl_sz = srqhdl->srq_real_sizes.srq_sgl_sz;
2858 	mutex_exit(&srqhdl->srq_lock);
2859 	*limit_p  = 0;
2860 
2861 	TAVOR_TNF_EXIT(tavor_ci_query_srq);
2862 	return (IBT_SUCCESS);
2863 }
2864 
2865 /*
2866  * tavor_ci_modify_srq()
2867  *    Modify properties of a Shared Receive Queue (SRQ)
2868  *    Context: Can be called from interrupt or base context.
2869  */
2870 /* ARGSUSED */
2871 static ibt_status_t
tavor_ci_modify_srq(ibc_hca_hdl_t hca,ibc_srq_hdl_t srq,ibt_srq_modify_flags_t flags,uint_t size,uint_t limit,uint_t * ret_size_p)2872 tavor_ci_modify_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
2873     ibt_srq_modify_flags_t flags, uint_t size, uint_t limit, uint_t *ret_size_p)
2874 {
2875 	tavor_state_t	*state;
2876 	tavor_srqhdl_t	srqhdl;
2877 	uint_t		resize_supported, cur_srq_size;
2878 	int		status;
2879 
2880 	TAVOR_TNF_ENTER(tavor_ci_modify_srq);
2881 
2882 	/* Check for valid HCA handle */
2883 	if (hca == NULL) {
2884 		TNF_PROBE_0(tavor_ci_modify_srq_invhca_fail,
2885 		    TAVOR_TNF_ERROR, "");
2886 		TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2887 		return (IBT_HCA_HDL_INVALID);
2888 	}
2889 
2890 	state = (tavor_state_t *)hca;
2891 
2892 	/* Check if SRQ is even supported */
2893 	if (state->ts_cfg_profile->cp_srq_enable == 0) {
2894 		TNF_PROBE_0(tavor_ci_modify_srq_not_supported_fail,
2895 		    TAVOR_TNF_ERROR, "");
2896 		TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2897 		return (IBT_NOT_SUPPORTED);
2898 	}
2899 
2900 	/* Check for valid SRQ handle pointer */
2901 	if (srq == NULL) {
2902 		TNF_PROBE_0(tavor_ci_modify_srq_invcqhdl_fail,
2903 		    TAVOR_TNF_ERROR, "");
2904 		TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2905 		return (IBT_SRQ_HDL_INVALID);
2906 	}
2907 
2908 	srqhdl = (tavor_srqhdl_t)srq;
2909 
2910 	/*
2911 	 * Check Error State of SRQ.
2912 	 * Also, while we are holding the lock we save away the current SRQ
2913 	 * size for later use.
2914 	 */
2915 	mutex_enter(&srqhdl->srq_lock);
2916 	cur_srq_size = srqhdl->srq_wq_bufsz;
2917 	if (srqhdl->srq_state == TAVOR_SRQ_STATE_ERROR) {
2918 		mutex_exit(&srqhdl->srq_lock);
2919 		TNF_PROBE_0(tavor_ci_modify_srq_error_state,
2920 		    TAVOR_TNF_ERROR, "");
2921 		TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2922 		return (IBT_SRQ_ERROR_STATE);
2923 	}
2924 	mutex_exit(&srqhdl->srq_lock);
2925 
2926 	/*
2927 	 * Setting the limit watermark is not currently supported.  This is a
2928 	 * tavor hardware (firmware) limitation.  We return NOT_SUPPORTED here,
2929 	 * and have the limit code commented out for now.
2930 	 *
2931 	 * XXX If we enable the limit watermark support, we need to do checks
2932 	 * and set the 'srq->srq_wr_limit' here, instead of returning not
2933 	 * supported.  The 'tavor_srq_modify' operation below is for resizing
2934 	 * the SRQ only, the limit work should be done here.  If this is
2935 	 * changed to use the 'limit' field, the 'ARGSUSED' comment for this
2936 	 * function should also be removed at that time.
2937 	 */
2938 	if (flags & IBT_SRQ_SET_LIMIT) {
2939 		TNF_PROBE_0(tavor_ci_modify_srq_limit_not_supported,
2940 		    TAVOR_TNF_ERROR, "");
2941 		TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2942 		return (IBT_NOT_SUPPORTED);
2943 	}
2944 
2945 	/*
2946 	 * Check the SET_SIZE flag.  If not set, we simply return success here.
2947 	 * However if it is set, we check if resize is supported and only then
2948 	 * do we continue on with our resize processing.
2949 	 */
2950 	if (!(flags & IBT_SRQ_SET_SIZE)) {
2951 		TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2952 		return (IBT_SUCCESS);
2953 	}
2954 
2955 	resize_supported = state->ts_ibtfinfo.hca_attr->hca_flags &
2956 	    IBT_HCA_RESIZE_SRQ;
2957 
2958 	if ((flags & IBT_SRQ_SET_SIZE) && !resize_supported) {
2959 		TNF_PROBE_0(tavor_ci_modify_srq_resize_not_supp_fail,
2960 		    TAVOR_TNF_ERROR, "");
2961 		TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2962 		return (IBT_NOT_SUPPORTED);
2963 	}
2964 
2965 	/*
2966 	 * We do not support resizing an SRQ to be smaller than it's current
2967 	 * size.  If a smaller (or equal) size is requested, then we simply
2968 	 * return success, and do nothing.
2969 	 */
2970 	if (size <= cur_srq_size) {
2971 		*ret_size_p = cur_srq_size;
2972 		TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2973 		return (IBT_SUCCESS);
2974 	}
2975 
2976 	status = tavor_srq_modify(state, srqhdl, size, ret_size_p,
2977 	    TAVOR_NOSLEEP);
2978 	if (status != DDI_SUCCESS) {
2979 		/* Set return value to current SRQ size */
2980 		*ret_size_p = cur_srq_size;
2981 		TNF_PROBE_1(tavor_ci_modify_srq_fail, TAVOR_TNF_ERROR, "",
2982 		    tnf_uint, status, status);
2983 		TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2984 		return (status);
2985 	}
2986 
2987 	TAVOR_TNF_EXIT(tavor_ci_modify_srq);
2988 	return (IBT_SUCCESS);
2989 }
2990 
2991 /*
2992  * tavor_ci_post_srq()
2993  *    Post a Work Request to the specified Shared Receive Queue (SRQ)
2994  *    Context: Can be called from interrupt or base context.
2995  */
2996 static ibt_status_t
tavor_ci_post_srq(ibc_hca_hdl_t hca,ibc_srq_hdl_t srq,ibt_recv_wr_t * wr,uint_t num_wr,uint_t * num_posted_p)2997 tavor_ci_post_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
2998     ibt_recv_wr_t *wr, uint_t num_wr, uint_t *num_posted_p)
2999 {
3000 	tavor_state_t	*state;
3001 	tavor_srqhdl_t	srqhdl;
3002 	int		status;
3003 
3004 	TAVOR_TNF_ENTER(tavor_ci_post_srq);
3005 
3006 	/* Check for valid HCA handle */
3007 	if (hca == NULL) {
3008 		TNF_PROBE_0(tavor_ci_post_srq_invhca_fail,
3009 		    TAVOR_TNF_ERROR, "");
3010 		TAVOR_TNF_EXIT(tavor_ci_post_srq);
3011 		return (IBT_HCA_HDL_INVALID);
3012 	}
3013 
3014 	state = (tavor_state_t *)hca;
3015 
3016 	/* Check if SRQ is even supported */
3017 	if (state->ts_cfg_profile->cp_srq_enable == 0) {
3018 		TNF_PROBE_0(tavor_ci_post_srq_not_supported_fail,
3019 		    TAVOR_TNF_ERROR, "");
3020 		TAVOR_TNF_EXIT(tavor_ci_post_srq);
3021 		return (IBT_NOT_SUPPORTED);
3022 	}
3023 
3024 	/* Check for valid SRQ handle pointer */
3025 	if (srq == NULL) {
3026 		TNF_PROBE_0(tavor_ci_post_srq_invsrqhdl_fail,
3027 		    TAVOR_TNF_ERROR, "");
3028 		TAVOR_TNF_EXIT(tavor_ci_post_srq);
3029 		return (IBT_SRQ_HDL_INVALID);
3030 	}
3031 
3032 	srqhdl = (tavor_srqhdl_t)srq;
3033 
3034 	status = tavor_post_srq(state, srqhdl, wr, num_wr, num_posted_p);
3035 	if (status != DDI_SUCCESS) {
3036 		TNF_PROBE_1(tavor_ci_post_srq_fail, TAVOR_TNF_ERROR, "",
3037 		    tnf_uint, status, status);
3038 		TAVOR_TNF_EXIT(tavor_ci_post_srq);
3039 		return (status);
3040 	}
3041 
3042 	TAVOR_TNF_EXIT(tavor_ci_post_srq);
3043 	return (IBT_SUCCESS);
3044 }
3045 
3046 /* Address translation */
3047 /*
3048  * tavor_ci_map_mem_area()
3049  *    Context: Can be called from interrupt or base context.
3050  */
3051 /* ARGSUSED */
3052 static ibt_status_t
tavor_ci_map_mem_area(ibc_hca_hdl_t hca,ibt_va_attr_t * va_attrs,void * ibtl_reserved,uint_t list_len,ibt_reg_req_t * reg_req,ibc_ma_hdl_t * ibc_ma_hdl_p)3053 tavor_ci_map_mem_area(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs,
3054     void *ibtl_reserved, uint_t list_len, ibt_reg_req_t *reg_req,
3055     ibc_ma_hdl_t *ibc_ma_hdl_p)
3056 {
3057 	return (IBT_NOT_SUPPORTED);
3058 }
3059 
3060 /*
3061  * tavor_ci_unmap_mem_area()
3062  * Unmap the memory area
3063  *    Context: Can be called from interrupt or base context.
3064  */
3065 /* ARGSUSED */
3066 static ibt_status_t
tavor_ci_unmap_mem_area(ibc_hca_hdl_t hca,ibc_ma_hdl_t ma_hdl)3067 tavor_ci_unmap_mem_area(ibc_hca_hdl_t hca, ibc_ma_hdl_t ma_hdl)
3068 {
3069 	return (IBT_NOT_SUPPORTED);
3070 }
3071 
3072 struct ibc_mi_s {
3073 	int			imh_len;
3074 	ddi_dma_handle_t	imh_dmahandle[1];
3075 };
3076 _NOTE(SCHEME_PROTECTS_DATA("safe sharing",
3077     ibc_mi_s::imh_len
3078     ibc_mi_s::imh_dmahandle))
3079 
3080 
3081 /*
3082  * tavor_ci_map_mem_iov()
3083  * Map the memory
3084  *    Context: Can be called from interrupt or base context.
3085  */
3086 /* ARGSUSED */
3087 static ibt_status_t
tavor_ci_map_mem_iov(ibc_hca_hdl_t hca,ibt_iov_attr_t * iov_attr,ibt_all_wr_t * wr,ibc_mi_hdl_t * mi_hdl_p)3088 tavor_ci_map_mem_iov(ibc_hca_hdl_t hca, ibt_iov_attr_t *iov_attr,
3089     ibt_all_wr_t *wr, ibc_mi_hdl_t *mi_hdl_p)
3090 {
3091 	int			status;
3092 	int			i, j, nds, max_nds;
3093 	uint_t			len;
3094 	ibt_status_t		ibt_status;
3095 	ddi_dma_handle_t	dmahdl;
3096 	ddi_dma_cookie_t	dmacookie;
3097 	ddi_dma_attr_t		dma_attr;
3098 	uint_t			cookie_cnt;
3099 	ibc_mi_hdl_t		mi_hdl;
3100 	ibt_lkey_t		rsvd_lkey;
3101 	ibt_wr_ds_t		*sgl;
3102 	tavor_state_t		*state;
3103 	int			kmflag;
3104 	int			(*callback)(caddr_t);
3105 
3106 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wr))
3107 
3108 	if (mi_hdl_p == NULL)
3109 		return (IBT_MI_HDL_INVALID);
3110 
3111 	/* Check for valid HCA handle */
3112 	if (hca == NULL)
3113 		return (IBT_HCA_HDL_INVALID);
3114 
3115 	/* Tavor does not allow the default "use reserved lkey" */
3116 	if ((iov_attr->iov_flags & IBT_IOV_ALT_LKEY) == 0)
3117 		return (IBT_INVALID_PARAM);
3118 
3119 	rsvd_lkey = iov_attr->iov_alt_lkey;
3120 
3121 	state = (tavor_state_t *)hca;
3122 	tavor_dma_attr_init(&dma_attr);
3123 #ifdef	__sparc
3124 	if (state->ts_cfg_profile->cp_iommu_bypass == TAVOR_BINDMEM_BYPASS)
3125 		dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
3126 #endif
3127 
3128 	nds = 0;
3129 	max_nds = iov_attr->iov_wr_nds;
3130 	if (iov_attr->iov_lso_hdr_sz)
3131 		max_nds -= (iov_attr->iov_lso_hdr_sz + sizeof (uint32_t) +
3132 		    0xf) >> 4;	/* 0xf is for rounding up to a multiple of 16 */
3133 	if ((iov_attr->iov_flags & IBT_IOV_NOSLEEP) == 0) {
3134 		kmflag = KM_SLEEP;
3135 		callback = DDI_DMA_SLEEP;
3136 	} else {
3137 		kmflag = KM_NOSLEEP;
3138 		callback = DDI_DMA_DONTWAIT;
3139 	}
3140 
3141 	if (iov_attr->iov_flags & IBT_IOV_BUF) {
3142 		mi_hdl = kmem_alloc(sizeof (*mi_hdl), kmflag);
3143 		if (mi_hdl == NULL)
3144 			return (IBT_INSUFF_RESOURCE);
3145 		sgl = wr->send.wr_sgl;
3146 		_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
3147 
3148 		status = ddi_dma_alloc_handle(state->ts_dip, &dma_attr,
3149 		    callback, NULL, &dmahdl);
3150 		if (status != DDI_SUCCESS) {
3151 			kmem_free(mi_hdl, sizeof (*mi_hdl));
3152 			return (IBT_INSUFF_RESOURCE);
3153 		}
3154 		status = ddi_dma_buf_bind_handle(dmahdl, iov_attr->iov_buf,
3155 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3156 		    &dmacookie, &cookie_cnt);
3157 		if (status != DDI_DMA_MAPPED) {
3158 			ddi_dma_free_handle(&dmahdl);
3159 			kmem_free(mi_hdl, sizeof (*mi_hdl));
3160 			return (ibc_get_ci_failure(0));
3161 		}
3162 		while (cookie_cnt-- > 0) {
3163 			if (nds > max_nds) {
3164 				status = ddi_dma_unbind_handle(dmahdl);
3165 				ddi_dma_free_handle(&dmahdl);
3166 				return (IBT_SGL_TOO_SMALL);
3167 			}
3168 			sgl[nds].ds_va = dmacookie.dmac_laddress;
3169 			sgl[nds].ds_key = rsvd_lkey;
3170 			sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
3171 			nds++;
3172 			if (cookie_cnt != 0)
3173 				ddi_dma_nextcookie(dmahdl, &dmacookie);
3174 		}
3175 		wr->send.wr_nds = nds;
3176 		mi_hdl->imh_len = 1;
3177 		mi_hdl->imh_dmahandle[0] = dmahdl;
3178 		*mi_hdl_p = mi_hdl;
3179 		return (IBT_SUCCESS);
3180 	}
3181 
3182 	if (iov_attr->iov_flags & IBT_IOV_RECV)
3183 		sgl = wr->recv.wr_sgl;
3184 	else
3185 		sgl = wr->send.wr_sgl;
3186 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
3187 
3188 	len = iov_attr->iov_list_len;
3189 	for (i = 0, j = 0; j < len; j++) {
3190 		if (iov_attr->iov[j].iov_len == 0)
3191 			continue;
3192 		i++;
3193 	}
3194 	mi_hdl = kmem_alloc(sizeof (*mi_hdl) +
3195 	    (i - 1) * sizeof (ddi_dma_handle_t), kmflag);
3196 	if (mi_hdl == NULL)
3197 		return (IBT_INSUFF_RESOURCE);
3198 	mi_hdl->imh_len = i;
3199 	for (i = 0, j = 0; j < len; j++) {
3200 		if (iov_attr->iov[j].iov_len == 0)
3201 			continue;
3202 		status = ddi_dma_alloc_handle(state->ts_dip, &dma_attr,
3203 		    callback, NULL, &dmahdl);
3204 		if (status != DDI_SUCCESS) {
3205 			ibt_status = IBT_INSUFF_RESOURCE;
3206 			goto fail2;
3207 		}
3208 		status = ddi_dma_addr_bind_handle(dmahdl, iov_attr->iov_as,
3209 		    iov_attr->iov[j].iov_addr, iov_attr->iov[j].iov_len,
3210 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
3211 		    &dmacookie, &cookie_cnt);
3212 		if (status != DDI_DMA_MAPPED) {
3213 			ibt_status = ibc_get_ci_failure(0);
3214 			goto fail1;
3215 		}
3216 		if (nds + cookie_cnt > max_nds) {
3217 			ibt_status = IBT_SGL_TOO_SMALL;
3218 			goto fail2;
3219 		}
3220 		while (cookie_cnt-- > 0) {
3221 			sgl[nds].ds_va = dmacookie.dmac_laddress;
3222 			sgl[nds].ds_key = rsvd_lkey;
3223 			sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
3224 			nds++;
3225 			if (cookie_cnt != 0)
3226 				ddi_dma_nextcookie(dmahdl, &dmacookie);
3227 		}
3228 		mi_hdl->imh_dmahandle[i] = dmahdl;
3229 		i++;
3230 	}
3231 
3232 	if (iov_attr->iov_flags & IBT_IOV_RECV)
3233 		wr->recv.wr_nds = nds;
3234 	else
3235 		wr->send.wr_nds = nds;
3236 	*mi_hdl_p = mi_hdl;
3237 	return (IBT_SUCCESS);
3238 
3239 fail1:
3240 	ddi_dma_free_handle(&dmahdl);
3241 fail2:
3242 	while (--i >= 0) {
3243 		status = ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
3244 		ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
3245 	}
3246 	kmem_free(mi_hdl, sizeof (*mi_hdl) +
3247 	    (len - 1) * sizeof (ddi_dma_handle_t));
3248 	*mi_hdl_p = NULL;
3249 	return (ibt_status);
3250 }
3251 
3252 /*
3253  * tavor_ci_unmap_mem_iov()
3254  * Unmap the memory
3255  *    Context: Can be called from interrupt or base context.
3256  */
3257 /* ARGSUSED */
3258 static ibt_status_t
tavor_ci_unmap_mem_iov(ibc_hca_hdl_t hca,ibc_mi_hdl_t mi_hdl)3259 tavor_ci_unmap_mem_iov(ibc_hca_hdl_t hca, ibc_mi_hdl_t mi_hdl)
3260 {
3261 	int		i;
3262 
3263 	/* Check for valid HCA handle */
3264 	if (hca == NULL)
3265 		return (IBT_HCA_HDL_INVALID);
3266 
3267 	if (mi_hdl == NULL)
3268 		return (IBT_MI_HDL_INVALID);
3269 
3270 	for (i = 0; i < mi_hdl->imh_len; i++) {
3271 		(void) ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
3272 		ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
3273 	}
3274 	kmem_free(mi_hdl, sizeof (*mi_hdl) +
3275 	    (mi_hdl->imh_len - 1) * sizeof (ddi_dma_handle_t));
3276 	return (IBT_SUCCESS);
3277 }
3278 
3279 /* Allocate L_Key */
3280 /*
3281  * tavor_ci_alloc_lkey()
3282  */
3283 /* ARGSUSED */
3284 static ibt_status_t
tavor_ci_alloc_lkey(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_lkey_flags_t flags,uint_t phys_buf_list_sz,ibc_mr_hdl_t * mr_p,ibt_pmr_desc_t * mem_desc_p)3285 tavor_ci_alloc_lkey(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
3286     ibt_lkey_flags_t flags, uint_t phys_buf_list_sz, ibc_mr_hdl_t *mr_p,
3287     ibt_pmr_desc_t *mem_desc_p)
3288 {
3289 	TAVOR_TNF_ENTER(tavor_ci_alloc_lkey);
3290 	TAVOR_TNF_EXIT(tavor_ci_alloc_lkey);
3291 	return (IBT_NOT_SUPPORTED);
3292 }
3293 
3294 /* Physical Register Memory Region */
3295 /*
3296  * tavor_ci_register_physical_mr()
3297  */
3298 /* ARGSUSED */
3299 static ibt_status_t
tavor_ci_register_physical_mr(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_pmr_attr_t * mem_pattrs,void * ibtl_reserved,ibc_mr_hdl_t * mr_p,ibt_pmr_desc_t * mem_desc_p)3300 tavor_ci_register_physical_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
3301     ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
3302     ibt_pmr_desc_t *mem_desc_p)
3303 {
3304 	TAVOR_TNF_ENTER(tavor_ci_register_physical_mr);
3305 	TAVOR_TNF_EXIT(tavor_ci_register_physical_mr);
3306 	return (IBT_NOT_SUPPORTED);
3307 }
3308 
3309 /*
3310  * tavor_ci_reregister_physical_mr()
3311  */
3312 /* ARGSUSED */
3313 static ibt_status_t
tavor_ci_reregister_physical_mr(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr,ibc_pd_hdl_t pd,ibt_pmr_attr_t * mem_pattrs,void * ibtl_reserved,ibc_mr_hdl_t * mr_p,ibt_pmr_desc_t * mr_desc_p)3314 tavor_ci_reregister_physical_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
3315     ibc_pd_hdl_t pd, ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved,
3316     ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mr_desc_p)
3317 {
3318 	TAVOR_TNF_ENTER(tavor_ci_reregister_physical_mr);
3319 	TAVOR_TNF_EXIT(tavor_ci_reregister_physical_mr);
3320 	return (IBT_NOT_SUPPORTED);
3321 }
3322 
3323 /* Mellanox FMR Support */
3324 /*
3325  * tavor_ci_create_fmr_pool()
3326  * Creates a pool of memory regions suitable for FMR registration
3327  *    Context: Can be called from base context only
3328  */
3329 /* ARGSUSED */
3330 static ibt_status_t
tavor_ci_create_fmr_pool(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_fmr_pool_attr_t * params,ibc_fmr_pool_hdl_t * fmr_pool_p)3331 tavor_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
3332     ibt_fmr_pool_attr_t *params, ibc_fmr_pool_hdl_t *fmr_pool_p)
3333 {
3334 	return (IBT_NOT_SUPPORTED);
3335 }
3336 
3337 /*
3338  * tavor_ci_destroy_fmr_pool()
3339  * Free all resources associated with an FMR pool.
3340  *    Context: Can be called from base context only.
3341  */
3342 /* ARGSUSED */
3343 static ibt_status_t
tavor_ci_destroy_fmr_pool(ibc_hca_hdl_t hca,ibc_fmr_pool_hdl_t fmr_pool)3344 tavor_ci_destroy_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
3345 {
3346 	return (IBT_NOT_SUPPORTED);
3347 }
3348 
3349 /*
3350  * tavor_ci_flush_fmr_pool()
3351  * Force a flush of the memory tables, cleaning up used FMR resources.
3352  *    Context: Can be called from interrupt or base context.
3353  */
3354 /* ARGSUSED */
3355 static ibt_status_t
tavor_ci_flush_fmr_pool(ibc_hca_hdl_t hca,ibc_fmr_pool_hdl_t fmr_pool)3356 tavor_ci_flush_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
3357 {
3358 	return (IBT_NOT_SUPPORTED);
3359 }
3360 
3361 /*
3362  * tavor_ci_register_physical_fmr()
3363  * From the 'pool' of FMR regions passed in, performs register physical
3364  * operation.
3365  *    Context: Can be called from interrupt or base context.
3366  */
3367 /* ARGSUSED */
3368 static ibt_status_t
tavor_ci_register_physical_fmr(ibc_hca_hdl_t hca,ibc_fmr_pool_hdl_t fmr_pool,ibt_pmr_attr_t * mem_pattr,void * ibtl_reserved,ibc_mr_hdl_t * mr_p,ibt_pmr_desc_t * mem_desc_p)3369 tavor_ci_register_physical_fmr(ibc_hca_hdl_t hca,
3370     ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
3371     void *ibtl_reserved, ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mem_desc_p)
3372 {
3373 	return (IBT_NOT_SUPPORTED);
3374 }
3375 
3376 /*
3377  * tavor_ci_deregister_fmr()
3378  * Moves an FMR (specified by 'mr') to the deregistered state.
3379  *    Context: Can be called from base context only.
3380  */
3381 /* ARGSUSED */
3382 static ibt_status_t
tavor_ci_deregister_fmr(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr)3383 tavor_ci_deregister_fmr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
3384 {
3385 	return (IBT_NOT_SUPPORTED);
3386 }
3387 
3388 /*
3389  * tavor_ci_alloc_io_mem()
3390  *     Allocate dmable memory
3391  *
3392  */
3393 ibt_status_t
tavor_ci_alloc_io_mem(ibc_hca_hdl_t hca,size_t size,ibt_mr_flags_t mr_flag,caddr_t * kaddrp,ibc_mem_alloc_hdl_t * mem_alloc_hdl)3394 tavor_ci_alloc_io_mem(
3395 	ibc_hca_hdl_t hca,
3396 	size_t size,
3397 	ibt_mr_flags_t mr_flag,
3398 	caddr_t *kaddrp,
3399 	ibc_mem_alloc_hdl_t *mem_alloc_hdl)
3400 {
3401 	tavor_state_t	*state;
3402 	int		status;
3403 
3404 	TAVOR_TNF_ENTER(tavor_ci_alloc_io_mem);
3405 
3406 	/* Check for valid HCA handle */
3407 	if (hca == NULL) {
3408 		TNF_PROBE_0(tavor_ci_alloc_io_mem_invhca_fail,
3409 		    TAVOR_TNF_ERROR, "");
3410 		TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem);
3411 		return (IBT_HCA_HDL_INVALID);
3412 	}
3413 
3414 	/* Check for valid mem_alloc_hdl handle pointer */
3415 	if (mem_alloc_hdl == NULL) {
3416 		TNF_PROBE_0(tavor_ci_alloc_io_mem_hdl_fail,
3417 		    TAVOR_TNF_ERROR, "");
3418 		TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem);
3419 		return (IBT_MEM_ALLOC_HDL_INVALID);
3420 	}
3421 
3422 	/* Grab the Tavor softstate pointer and mem handle */
3423 	state = (tavor_state_t *)hca;
3424 
3425 	/* Allocate the AH */
3426 	status = tavor_mem_alloc(state, size, mr_flag, kaddrp,
3427 	    (tavor_mem_alloc_hdl_t *)mem_alloc_hdl);
3428 
3429 	if (status != DDI_SUCCESS) {
3430 		TNF_PROBE_1(tavor_ci_alloc_ah_fail, TAVOR_TNF_ERROR, "",
3431 		    tnf_uint, status, status);
3432 		TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem);
3433 		return (status);
3434 	}
3435 
3436 	TAVOR_TNF_EXIT(tavor_ci_alloc_io_mem);
3437 	return (IBT_SUCCESS);
3438 }
3439 
3440 
3441 /*
3442  * tavor_ci_free_io_mem()
3443  * free the memory
3444  */
3445 ibt_status_t
tavor_ci_free_io_mem(ibc_hca_hdl_t hca,ibc_mem_alloc_hdl_t mem_alloc_hdl)3446 tavor_ci_free_io_mem(ibc_hca_hdl_t hca, ibc_mem_alloc_hdl_t mem_alloc_hdl)
3447 {
3448 	tavor_mem_alloc_hdl_t	memhdl;
3449 
3450 	TAVOR_TNF_ENTER(tavor_ci_free_io_mem);
3451 
3452 	/* Check for valid HCA handle */
3453 	if (hca == NULL) {
3454 		TNF_PROBE_0(tavor_ci_free_io_mem_invhca_fail,
3455 		    TAVOR_TNF_ERROR, "");
3456 		TAVOR_TNF_EXIT(tavor_ci_free_io_mem);
3457 		return (IBT_HCA_HDL_INVALID);
3458 	}
3459 
3460 	/* Check for valid mem_alloc_hdl handle pointer */
3461 	if (mem_alloc_hdl == NULL) {
3462 		TNF_PROBE_0(tavor_ci_free_io_mem_hdl_fail,
3463 		    TAVOR_TNF_ERROR, "");
3464 		TAVOR_TNF_EXIT(tavor_ci_free_io_mem);
3465 		return (IBT_MEM_ALLOC_HDL_INVALID);
3466 	}
3467 
3468 	memhdl = (tavor_mem_alloc_hdl_t)mem_alloc_hdl;
3469 
3470 	/* free the memory */
3471 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*memhdl))
3472 	ddi_dma_mem_free(&memhdl->tavor_acc_hdl);
3473 	ddi_dma_free_handle(&memhdl->tavor_dma_hdl);
3474 
3475 	kmem_free(memhdl, sizeof (*memhdl));
3476 	TAVOR_TNF_EXIT(tavor_dma_free);
3477 	return (IBT_SUCCESS);
3478 }
3479 
3480 
3481 int
tavor_mem_alloc(tavor_state_t * state,size_t size,ibt_mr_flags_t flags,caddr_t * kaddrp,tavor_mem_alloc_hdl_t * mem_hdl)3482 tavor_mem_alloc(
3483 	tavor_state_t *state,
3484 	size_t size,
3485 	ibt_mr_flags_t flags,
3486 	caddr_t *kaddrp,
3487 	tavor_mem_alloc_hdl_t *mem_hdl)
3488 {
3489 	ddi_dma_handle_t	dma_hdl;
3490 	ddi_dma_attr_t		dma_attr;
3491 	ddi_acc_handle_t	acc_hdl;
3492 	size_t			real_len;
3493 	int			status;
3494 	int 			(*ddi_cb)(caddr_t);
3495 
3496 	TAVOR_TNF_ENTER(tavor_mem_alloc);
3497 
3498 	tavor_dma_attr_init(&dma_attr);
3499 
3500 	ddi_cb = (flags & IBT_MR_NOSLEEP) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
3501 
3502 	/* Allocate a DMA handle */
3503 	status = ddi_dma_alloc_handle(state->ts_dip, &dma_attr, ddi_cb,
3504 	    NULL, &dma_hdl);
3505 	if (status != DDI_SUCCESS) {
3506 		TNF_PROBE_0(tavor_dma_alloc_handle_fail, TAVOR_TNF_ERROR, "");
3507 		TAVOR_TNF_EXIT(tavor_mem_alloc);
3508 		return (DDI_FAILURE);
3509 	}
3510 
3511 	/* Allocate DMA memory */
3512 	status = ddi_dma_mem_alloc(dma_hdl, size,
3513 	    &state->ts_reg_accattr, DDI_DMA_CONSISTENT, ddi_cb,
3514 	    NULL,
3515 	    kaddrp, &real_len, &acc_hdl);
3516 	if (status != DDI_SUCCESS) {
3517 		ddi_dma_free_handle(&dma_hdl);
3518 		TNF_PROBE_0(tavor_dma_alloc_memory_fail, TAVOR_TNF_ERROR, "");
3519 		TAVOR_TNF_EXIT(tavor_mem_alloc);
3520 		return (DDI_FAILURE);
3521 	}
3522 
3523 	/* Package the tavor_dma_info contents and return */
3524 	*mem_hdl = kmem_alloc(sizeof (**mem_hdl),
3525 	    flags & IBT_MR_NOSLEEP ? KM_NOSLEEP : KM_SLEEP);
3526 	if (*mem_hdl == NULL) {
3527 		ddi_dma_mem_free(&acc_hdl);
3528 		ddi_dma_free_handle(&dma_hdl);
3529 		TNF_PROBE_0(tavor_dma_alloc_memory_fail, TAVOR_TNF_ERROR, "");
3530 		TAVOR_TNF_EXIT(tavor_mem_alloc);
3531 		return (DDI_FAILURE);
3532 	}
3533 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(**mem_hdl))
3534 	(*mem_hdl)->tavor_dma_hdl = dma_hdl;
3535 	(*mem_hdl)->tavor_acc_hdl = acc_hdl;
3536 
3537 	TAVOR_TNF_EXIT(tavor_mem_alloc);
3538 	return (DDI_SUCCESS);
3539 }
3540