1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * tavor_ci.c
28 * Tavor Channel Interface (CI) Routines
29 *
30 * Implements all the routines necessary to interface with the IBTF.
31 * Pointers to all of these functions are passed to the IBTF at attach()
32 * time in the ibc_operations_t structure. These functions include all
33 * of the necessary routines to implement the required InfiniBand "verbs"
34 * and additional IBTF-specific interfaces.
35 */
36
37 #include <sys/types.h>
38 #include <sys/conf.h>
39 #include <sys/ddi.h>
40 #include <sys/sunddi.h>
41
42 #include <sys/ib/adapters/tavor/tavor.h>
43
44 /* HCA and port related operations */
45 static ibt_status_t tavor_ci_query_hca_ports(ibc_hca_hdl_t, uint8_t,
46 ibt_hca_portinfo_t *);
47 static ibt_status_t tavor_ci_modify_ports(ibc_hca_hdl_t, uint8_t,
48 ibt_port_modify_flags_t, uint8_t);
49 static ibt_status_t tavor_ci_modify_system_image(ibc_hca_hdl_t, ib_guid_t);
50
51 /* Protection Domains */
52 static ibt_status_t tavor_ci_alloc_pd(ibc_hca_hdl_t, ibt_pd_flags_t,
53 ibc_pd_hdl_t *);
54 static ibt_status_t tavor_ci_free_pd(ibc_hca_hdl_t, ibc_pd_hdl_t);
55
56 /* Reliable Datagram Domains */
57 static ibt_status_t tavor_ci_alloc_rdd(ibc_hca_hdl_t, ibc_rdd_flags_t,
58 ibc_rdd_hdl_t *);
59 static ibt_status_t tavor_ci_free_rdd(ibc_hca_hdl_t, ibc_rdd_hdl_t);
60
61 /* Address Handles */
62 static ibt_status_t tavor_ci_alloc_ah(ibc_hca_hdl_t, ibt_ah_flags_t,
63 ibc_pd_hdl_t, ibt_adds_vect_t *, ibc_ah_hdl_t *);
64 static ibt_status_t tavor_ci_free_ah(ibc_hca_hdl_t, ibc_ah_hdl_t);
65 static ibt_status_t tavor_ci_query_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
66 ibc_pd_hdl_t *, ibt_adds_vect_t *);
67 static ibt_status_t tavor_ci_modify_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
68 ibt_adds_vect_t *);
69
70 /* Queue Pairs */
71 static ibt_status_t tavor_ci_alloc_qp(ibc_hca_hdl_t, ibtl_qp_hdl_t,
72 ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *, ib_qpn_t *,
73 ibc_qp_hdl_t *);
74 static ibt_status_t tavor_ci_alloc_special_qp(ibc_hca_hdl_t, uint8_t,
75 ibtl_qp_hdl_t, ibt_sqp_type_t, ibt_qp_alloc_attr_t *,
76 ibt_chan_sizes_t *, ibc_qp_hdl_t *);
77 static ibt_status_t tavor_ci_alloc_qp_range(ibc_hca_hdl_t, uint_t,
78 ibtl_qp_hdl_t *, ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *,
79 ibc_cq_hdl_t *, ibc_cq_hdl_t *, ib_qpn_t *, ibc_qp_hdl_t *);
80 static ibt_status_t tavor_ci_free_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
81 ibc_free_qp_flags_t, ibc_qpn_hdl_t *);
82 static ibt_status_t tavor_ci_release_qpn(ibc_hca_hdl_t, ibc_qpn_hdl_t);
83 static ibt_status_t tavor_ci_query_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
84 ibt_qp_query_attr_t *);
85 static ibt_status_t tavor_ci_modify_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
86 ibt_cep_modify_flags_t, ibt_qp_info_t *, ibt_queue_sizes_t *);
87
88 /* Completion Queues */
89 static ibt_status_t tavor_ci_alloc_cq(ibc_hca_hdl_t, ibt_cq_hdl_t,
90 ibt_cq_attr_t *, ibc_cq_hdl_t *, uint_t *);
91 static ibt_status_t tavor_ci_free_cq(ibc_hca_hdl_t, ibc_cq_hdl_t);
92 static ibt_status_t tavor_ci_query_cq(ibc_hca_hdl_t, ibc_cq_hdl_t, uint_t *,
93 uint_t *, uint_t *, ibt_cq_handler_id_t *);
94 static ibt_status_t tavor_ci_resize_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
95 uint_t, uint_t *);
96 static ibt_status_t tavor_ci_modify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
97 uint_t, uint_t, ibt_cq_handler_id_t);
98 static ibt_status_t tavor_ci_alloc_cq_sched(ibc_hca_hdl_t,
99 ibt_cq_sched_attr_t *, ibc_sched_hdl_t *);
100 static ibt_status_t tavor_ci_free_cq_sched(ibc_hca_hdl_t, ibc_sched_hdl_t);
101
102 /* EE Contexts */
103 static ibt_status_t tavor_ci_alloc_eec(ibc_hca_hdl_t, ibc_eec_flags_t,
104 ibt_eec_hdl_t, ibc_rdd_hdl_t, ibc_eec_hdl_t *);
105 static ibt_status_t tavor_ci_free_eec(ibc_hca_hdl_t, ibc_eec_hdl_t);
106 static ibt_status_t tavor_ci_query_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
107 ibt_eec_query_attr_t *);
108 static ibt_status_t tavor_ci_modify_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
109 ibt_cep_modify_flags_t, ibt_eec_info_t *);
110
111 /* Memory Registration */
112 static ibt_status_t tavor_ci_register_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
113 ibt_mr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *);
114 static ibt_status_t tavor_ci_register_buf(ibc_hca_hdl_t, ibc_pd_hdl_t,
115 ibt_smr_attr_t *, struct buf *, void *, ibt_mr_hdl_t *, ibt_mr_desc_t *);
116 static ibt_status_t tavor_ci_register_shared_mr(ibc_hca_hdl_t,
117 ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_smr_attr_t *, void *,
118 ibc_mr_hdl_t *, ibt_mr_desc_t *);
119 static ibt_status_t tavor_ci_deregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t);
120 static ibt_status_t tavor_ci_query_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
121 ibt_mr_query_attr_t *);
122 static ibt_status_t tavor_ci_reregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
123 ibc_pd_hdl_t, ibt_mr_attr_t *, void *, ibc_mr_hdl_t *,
124 ibt_mr_desc_t *);
125 static ibt_status_t tavor_ci_reregister_buf(ibc_hca_hdl_t, ibc_mr_hdl_t,
126 ibc_pd_hdl_t, ibt_smr_attr_t *, struct buf *, void *, ibc_mr_hdl_t *,
127 ibt_mr_desc_t *);
128 static ibt_status_t tavor_ci_sync_mr(ibc_hca_hdl_t, ibt_mr_sync_t *, size_t);
129 static ibt_status_t tavor_ci_register_dma_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
130 ibt_dmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *);
131
132 /* Memory Windows */
133 static ibt_status_t tavor_ci_alloc_mw(ibc_hca_hdl_t, ibc_pd_hdl_t,
134 ibt_mw_flags_t, ibc_mw_hdl_t *, ibt_rkey_t *);
135 static ibt_status_t tavor_ci_free_mw(ibc_hca_hdl_t, ibc_mw_hdl_t);
136 static ibt_status_t tavor_ci_query_mw(ibc_hca_hdl_t, ibc_mw_hdl_t,
137 ibt_mw_query_attr_t *);
138
139 /* Multicast Groups */
140 static ibt_status_t tavor_ci_attach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
141 ib_gid_t, ib_lid_t);
142 static ibt_status_t tavor_ci_detach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
143 ib_gid_t, ib_lid_t);
144
145 /* Work Request and Completion Processing */
146 static ibt_status_t tavor_ci_post_send(ibc_hca_hdl_t, ibc_qp_hdl_t,
147 ibt_send_wr_t *, uint_t, uint_t *);
148 static ibt_status_t tavor_ci_post_recv(ibc_hca_hdl_t, ibc_qp_hdl_t,
149 ibt_recv_wr_t *, uint_t, uint_t *);
150 static ibt_status_t tavor_ci_poll_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
151 ibt_wc_t *, uint_t, uint_t *);
152 static ibt_status_t tavor_ci_notify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
153 ibt_cq_notify_flags_t);
154
155 /* CI Object Private Data */
156 static ibt_status_t tavor_ci_ci_data_in(ibc_hca_hdl_t, ibt_ci_data_flags_t,
157 ibt_object_type_t, void *, void *, size_t);
158
159 /* CI Object Private Data */
160 static ibt_status_t tavor_ci_ci_data_out(ibc_hca_hdl_t, ibt_ci_data_flags_t,
161 ibt_object_type_t, void *, void *, size_t);
162
163 /* Shared Receive Queues */
164 static ibt_status_t tavor_ci_alloc_srq(ibc_hca_hdl_t, ibt_srq_flags_t,
165 ibt_srq_hdl_t, ibc_pd_hdl_t, ibt_srq_sizes_t *, ibc_srq_hdl_t *,
166 ibt_srq_sizes_t *);
167 static ibt_status_t tavor_ci_free_srq(ibc_hca_hdl_t, ibc_srq_hdl_t);
168 static ibt_status_t tavor_ci_query_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
169 ibc_pd_hdl_t *, ibt_srq_sizes_t *, uint_t *);
170 static ibt_status_t tavor_ci_modify_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
171 ibt_srq_modify_flags_t, uint_t, uint_t, uint_t *);
172 static ibt_status_t tavor_ci_post_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
173 ibt_recv_wr_t *, uint_t, uint_t *);
174
175 /* Address translation */
176 static ibt_status_t tavor_ci_map_mem_area(ibc_hca_hdl_t, ibt_va_attr_t *,
177 void *, uint_t, ibt_reg_req_t *, ibc_ma_hdl_t *);
178 static ibt_status_t tavor_ci_unmap_mem_area(ibc_hca_hdl_t, ibc_ma_hdl_t);
179 static ibt_status_t tavor_ci_map_mem_iov(ibc_hca_hdl_t, ibt_iov_attr_t *,
180 ibt_all_wr_t *, ibc_mi_hdl_t *);
181 static ibt_status_t tavor_ci_unmap_mem_iov(ibc_hca_hdl_t, ibc_mi_hdl_t);
182
183 /* Allocate L_Key */
184 static ibt_status_t tavor_ci_alloc_lkey(ibc_hca_hdl_t, ibc_pd_hdl_t,
185 ibt_lkey_flags_t, uint_t, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
186
187 /* Physical Register Memory Region */
188 static ibt_status_t tavor_ci_register_physical_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
189 ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
190 static ibt_status_t tavor_ci_reregister_physical_mr(ibc_hca_hdl_t,
191 ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *,
192 ibt_pmr_desc_t *);
193
194 /* Mellanox FMR */
195 static ibt_status_t tavor_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
196 ibt_fmr_pool_attr_t *fmr_params, ibc_fmr_pool_hdl_t *fmr_pool);
197 static ibt_status_t tavor_ci_destroy_fmr_pool(ibc_hca_hdl_t hca,
198 ibc_fmr_pool_hdl_t fmr_pool);
199 static ibt_status_t tavor_ci_flush_fmr_pool(ibc_hca_hdl_t hca,
200 ibc_fmr_pool_hdl_t fmr_pool);
201 static ibt_status_t tavor_ci_register_physical_fmr(ibc_hca_hdl_t hca,
202 ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
203 void *ibtl_reserved, ibc_mr_hdl_t *mr_hdl_p, ibt_pmr_desc_t *mem_desc_p);
204 static ibt_status_t tavor_ci_deregister_fmr(ibc_hca_hdl_t hca,
205 ibc_mr_hdl_t mr);
206
207 static ibt_status_t tavor_ci_alloc_io_mem(ibc_hca_hdl_t, size_t,
208 ibt_mr_flags_t, caddr_t *, ibc_mem_alloc_hdl_t *);
209 static ibt_status_t tavor_ci_free_io_mem(ibc_hca_hdl_t, ibc_mem_alloc_hdl_t);
210 static int tavor_mem_alloc(tavor_state_t *, size_t, ibt_mr_flags_t,
211 caddr_t *, tavor_mem_alloc_hdl_t *);
212
213 static ibt_status_t tavor_ci_not_supported();
214
215 /*
216 * This ibc_operations_t structure includes pointers to all the entry points
217 * provided by the Tavor driver. This structure is passed to the IBTF at
218 * driver attach time, using the ibc_attach() call.
219 */
220 ibc_operations_t tavor_ibc_ops = {
221 /* HCA and port related operations */
222 tavor_ci_query_hca_ports,
223 tavor_ci_modify_ports,
224 tavor_ci_modify_system_image,
225
226 /* Protection Domains */
227 tavor_ci_alloc_pd,
228 tavor_ci_free_pd,
229
230 /* Reliable Datagram Domains */
231 tavor_ci_alloc_rdd,
232 tavor_ci_free_rdd,
233
234 /* Address Handles */
235 tavor_ci_alloc_ah,
236 tavor_ci_free_ah,
237 tavor_ci_query_ah,
238 tavor_ci_modify_ah,
239
240 /* Queue Pairs */
241 tavor_ci_alloc_qp,
242 tavor_ci_alloc_special_qp,
243 tavor_ci_alloc_qp_range,
244 tavor_ci_free_qp,
245 tavor_ci_release_qpn,
246 tavor_ci_query_qp,
247 tavor_ci_modify_qp,
248
249 /* Completion Queues */
250 tavor_ci_alloc_cq,
251 tavor_ci_free_cq,
252 tavor_ci_query_cq,
253 tavor_ci_resize_cq,
254 tavor_ci_modify_cq,
255 tavor_ci_alloc_cq_sched,
256 tavor_ci_free_cq_sched,
257 tavor_ci_not_supported, /* query_cq_handler_id */
258
259 /* EE Contexts */
260 tavor_ci_alloc_eec,
261 tavor_ci_free_eec,
262 tavor_ci_query_eec,
263 tavor_ci_modify_eec,
264
265 /* Memory Registration */
266 tavor_ci_register_mr,
267 tavor_ci_register_buf,
268 tavor_ci_register_shared_mr,
269 tavor_ci_deregister_mr,
270 tavor_ci_query_mr,
271 tavor_ci_reregister_mr,
272 tavor_ci_reregister_buf,
273 tavor_ci_sync_mr,
274
275 /* Memory Windows */
276 tavor_ci_alloc_mw,
277 tavor_ci_free_mw,
278 tavor_ci_query_mw,
279
280 /* Multicast Groups */
281 tavor_ci_attach_mcg,
282 tavor_ci_detach_mcg,
283
284 /* Work Request and Completion Processing */
285 tavor_ci_post_send,
286 tavor_ci_post_recv,
287 tavor_ci_poll_cq,
288 tavor_ci_notify_cq,
289
290 /* CI Object Mapping Data */
291 tavor_ci_ci_data_in,
292 tavor_ci_ci_data_out,
293
294 /* Shared Receive Queue */
295 tavor_ci_alloc_srq,
296 tavor_ci_free_srq,
297 tavor_ci_query_srq,
298 tavor_ci_modify_srq,
299 tavor_ci_post_srq,
300
301 /* Address translation */
302 tavor_ci_map_mem_area,
303 tavor_ci_unmap_mem_area,
304 tavor_ci_map_mem_iov,
305 tavor_ci_unmap_mem_iov,
306
307 /* Allocate L_key */
308 tavor_ci_alloc_lkey,
309
310 /* Physical Register Memory Region */
311 tavor_ci_register_physical_mr,
312 tavor_ci_reregister_physical_mr,
313
314 /* Mellanox FMR */
315 tavor_ci_create_fmr_pool,
316 tavor_ci_destroy_fmr_pool,
317 tavor_ci_flush_fmr_pool,
318 tavor_ci_register_physical_fmr,
319 tavor_ci_deregister_fmr,
320
321 /* dmable memory */
322 tavor_ci_alloc_io_mem,
323 tavor_ci_free_io_mem,
324
325 /* XRC not yet supported */
326 tavor_ci_not_supported, /* ibc_alloc_xrc_domain */
327 tavor_ci_not_supported, /* ibc_free_xrc_domain */
328 tavor_ci_not_supported, /* ibc_alloc_xrc_srq */
329 tavor_ci_not_supported, /* ibc_free_xrc_srq */
330 tavor_ci_not_supported, /* ibc_query_xrc_srq */
331 tavor_ci_not_supported, /* ibc_modify_xrc_srq */
332 tavor_ci_not_supported, /* ibc_alloc_xrc_tgt_qp */
333 tavor_ci_not_supported, /* ibc_free_xrc_tgt_qp */
334 tavor_ci_not_supported, /* ibc_query_xrc_tgt_qp */
335 tavor_ci_not_supported, /* ibc_modify_xrc_tgt_qp */
336
337 /* Memory Region (physical) */
338 tavor_ci_register_dma_mr,
339
340 /* Next enhancements */
341 tavor_ci_not_supported, /* ibc_enhancement1 */
342 tavor_ci_not_supported, /* ibc_enhancement2 */
343 tavor_ci_not_supported, /* ibc_enhancement3 */
344 tavor_ci_not_supported, /* ibc_enhancement4 */
345 };
346
347 /*
348 * Not yet implemented OPS
349 */
350 /* ARGSUSED */
351 static ibt_status_t
tavor_ci_not_supported()352 tavor_ci_not_supported()
353 {
354 return (IBT_NOT_SUPPORTED);
355 }
356
357
358 /*
359 * tavor_ci_query_hca_ports()
360 * Returns HCA port attributes for either one or all of the HCA's ports.
361 * Context: Can be called only from user or kernel context.
362 */
363 static ibt_status_t
tavor_ci_query_hca_ports(ibc_hca_hdl_t hca,uint8_t query_port,ibt_hca_portinfo_t * info_p)364 tavor_ci_query_hca_ports(ibc_hca_hdl_t hca, uint8_t query_port,
365 ibt_hca_portinfo_t *info_p)
366 {
367 tavor_state_t *state;
368 uint_t start, end, port;
369 int status, indx;
370
371 /* Check for valid HCA handle */
372 if (hca == NULL) {
373 return (IBT_HCA_HDL_INVALID);
374 }
375
376 /* Grab the Tavor softstate pointer */
377 state = (tavor_state_t *)hca;
378
379 /*
380 * If the specified port is zero, then we are supposed to query all
381 * ports. Otherwise, we query only the port number specified.
382 * Setup the start and end port numbers as appropriate for the loop
383 * below. Note: The first Tavor port is port number one (1).
384 */
385 if (query_port == 0) {
386 start = 1;
387 end = start + (state->ts_cfg_profile->cp_num_ports - 1);
388 } else {
389 end = start = query_port;
390 }
391
392 /* Query the port(s) */
393 for (port = start, indx = 0; port <= end; port++, indx++) {
394 status = tavor_port_query(state, port, &info_p[indx]);
395 if (status != DDI_SUCCESS) {
396 return (status);
397 }
398 }
399
400 return (IBT_SUCCESS);
401 }
402
403
404 /*
405 * tavor_ci_modify_ports()
406 * Modify HCA port attributes
407 * Context: Can be called only from user or kernel context.
408 */
409 static ibt_status_t
tavor_ci_modify_ports(ibc_hca_hdl_t hca,uint8_t port,ibt_port_modify_flags_t flags,uint8_t init_type)410 tavor_ci_modify_ports(ibc_hca_hdl_t hca, uint8_t port,
411 ibt_port_modify_flags_t flags, uint8_t init_type)
412 {
413 tavor_state_t *state;
414 int status;
415
416 /* Check for valid HCA handle */
417 if (hca == NULL) {
418 return (IBT_HCA_HDL_INVALID);
419 }
420
421 /* Grab the Tavor softstate pointer */
422 state = (tavor_state_t *)hca;
423
424 /* Modify the port(s) */
425 status = tavor_port_modify(state, port, flags, init_type);
426 if (status != DDI_SUCCESS) {
427 return (status);
428 }
429
430 return (IBT_SUCCESS);
431 }
432
433 /*
434 * tavor_ci_modify_system_image()
435 * Modify the System Image GUID
436 * Context: Can be called only from user or kernel context.
437 */
438 /* ARGSUSED */
439 static ibt_status_t
tavor_ci_modify_system_image(ibc_hca_hdl_t hca,ib_guid_t sys_guid)440 tavor_ci_modify_system_image(ibc_hca_hdl_t hca, ib_guid_t sys_guid)
441 {
442 /*
443 * This is an unsupported interface for the Tavor driver. This
444 * interface is necessary to support modification of the System
445 * Image GUID. Tavor is only capable of modifying this parameter
446 * once (during driver initialization).
447 */
448
449 return (IBT_NOT_SUPPORTED);
450 }
451
452 /*
453 * tavor_ci_alloc_pd()
454 * Allocate a Protection Domain
455 * Context: Can be called only from user or kernel context.
456 */
457 /* ARGSUSED */
458 static ibt_status_t
tavor_ci_alloc_pd(ibc_hca_hdl_t hca,ibt_pd_flags_t flags,ibc_pd_hdl_t * pd_p)459 tavor_ci_alloc_pd(ibc_hca_hdl_t hca, ibt_pd_flags_t flags, ibc_pd_hdl_t *pd_p)
460 {
461 tavor_state_t *state;
462 tavor_pdhdl_t pdhdl;
463 int status;
464
465 ASSERT(pd_p != NULL);
466
467 /* Check for valid HCA handle */
468 if (hca == NULL) {
469 return (IBT_HCA_HDL_INVALID);
470 }
471
472 /* Grab the Tavor softstate pointer */
473 state = (tavor_state_t *)hca;
474
475 /* Allocate the PD */
476 status = tavor_pd_alloc(state, &pdhdl, TAVOR_NOSLEEP);
477 if (status != DDI_SUCCESS) {
478 return (status);
479 }
480
481 /* Return the Tavor PD handle */
482 *pd_p = (ibc_pd_hdl_t)pdhdl;
483
484 return (IBT_SUCCESS);
485 }
486
487
488 /*
489 * tavor_ci_free_pd()
490 * Free a Protection Domain
491 * Context: Can be called only from user or kernel context
492 */
493 static ibt_status_t
tavor_ci_free_pd(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd)494 tavor_ci_free_pd(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd)
495 {
496 tavor_state_t *state;
497 tavor_pdhdl_t pdhdl;
498 int status;
499
500 /* Check for valid HCA handle */
501 if (hca == NULL) {
502 return (IBT_HCA_HDL_INVALID);
503 }
504
505 /* Check for valid PD handle pointer */
506 if (pd == NULL) {
507 return (IBT_PD_HDL_INVALID);
508 }
509
510 /* Grab the Tavor softstate pointer and PD handle */
511 state = (tavor_state_t *)hca;
512 pdhdl = (tavor_pdhdl_t)pd;
513
514 /* Free the PD */
515 status = tavor_pd_free(state, &pdhdl);
516 if (status != DDI_SUCCESS) {
517 return (status);
518 }
519
520 return (IBT_SUCCESS);
521 }
522
523
524 /*
525 * tavor_ci_alloc_rdd()
526 * Allocate a Reliable Datagram Domain
527 * Context: Can be called only from user or kernel context.
528 */
529 /* ARGSUSED */
530 static ibt_status_t
tavor_ci_alloc_rdd(ibc_hca_hdl_t hca,ibc_rdd_flags_t flags,ibc_rdd_hdl_t * rdd_p)531 tavor_ci_alloc_rdd(ibc_hca_hdl_t hca, ibc_rdd_flags_t flags,
532 ibc_rdd_hdl_t *rdd_p)
533 {
534 /*
535 * This is an unsupported interface for the Tavor driver. This
536 * interface is necessary to support Reliable Datagram (RD)
537 * operations. Tavor does not support RD.
538 */
539
540 return (IBT_NOT_SUPPORTED);
541 }
542
543
544 /*
545 * tavor_free_rdd()
546 * Free a Reliable Datagram Domain
547 * Context: Can be called only from user or kernel context.
548 */
549 /* ARGSUSED */
550 static ibt_status_t
tavor_ci_free_rdd(ibc_hca_hdl_t hca,ibc_rdd_hdl_t rdd)551 tavor_ci_free_rdd(ibc_hca_hdl_t hca, ibc_rdd_hdl_t rdd)
552 {
553 /*
554 * This is an unsupported interface for the Tavor driver. This
555 * interface is necessary to support Reliable Datagram (RD)
556 * operations. Tavor does not support RD.
557 */
558
559 return (IBT_NOT_SUPPORTED);
560 }
561
562
563 /*
564 * tavor_ci_alloc_ah()
565 * Allocate an Address Handle
566 * Context: Can be called only from user or kernel context.
567 */
568 /* ARGSUSED */
569 static ibt_status_t
tavor_ci_alloc_ah(ibc_hca_hdl_t hca,ibt_ah_flags_t flags,ibc_pd_hdl_t pd,ibt_adds_vect_t * attr_p,ibc_ah_hdl_t * ah_p)570 tavor_ci_alloc_ah(ibc_hca_hdl_t hca, ibt_ah_flags_t flags, ibc_pd_hdl_t pd,
571 ibt_adds_vect_t *attr_p, ibc_ah_hdl_t *ah_p)
572 {
573 tavor_state_t *state;
574 tavor_ahhdl_t ahhdl;
575 tavor_pdhdl_t pdhdl;
576 int status;
577
578 /* Check for valid HCA handle */
579 if (hca == NULL) {
580 return (IBT_HCA_HDL_INVALID);
581 }
582
583 /* Check for valid PD handle pointer */
584 if (pd == NULL) {
585 return (IBT_PD_HDL_INVALID);
586 }
587
588 /* Grab the Tavor softstate pointer and PD handle */
589 state = (tavor_state_t *)hca;
590 pdhdl = (tavor_pdhdl_t)pd;
591
592 /* Allocate the AH */
593 status = tavor_ah_alloc(state, pdhdl, attr_p, &ahhdl, TAVOR_NOSLEEP);
594 if (status != DDI_SUCCESS) {
595 return (status);
596 }
597
598 /* Return the Tavor AH handle */
599 *ah_p = (ibc_ah_hdl_t)ahhdl;
600
601 return (IBT_SUCCESS);
602 }
603
604
605 /*
606 * tavor_ci_free_ah()
607 * Free an Address Handle
608 * Context: Can be called only from user or kernel context.
609 */
610 static ibt_status_t
tavor_ci_free_ah(ibc_hca_hdl_t hca,ibc_ah_hdl_t ah)611 tavor_ci_free_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah)
612 {
613 tavor_state_t *state;
614 tavor_ahhdl_t ahhdl;
615 int status;
616
617 /* Check for valid HCA handle */
618 if (hca == NULL) {
619 return (IBT_HCA_HDL_INVALID);
620 }
621
622 /* Check for valid address handle pointer */
623 if (ah == NULL) {
624 return (IBT_AH_HDL_INVALID);
625 }
626
627 /* Grab the Tavor softstate pointer and AH handle */
628 state = (tavor_state_t *)hca;
629 ahhdl = (tavor_ahhdl_t)ah;
630
631 /* Free the AH */
632 status = tavor_ah_free(state, &ahhdl, TAVOR_NOSLEEP);
633 if (status != DDI_SUCCESS) {
634 return (status);
635 }
636
637 return (IBT_SUCCESS);
638 }
639
640
641 /*
642 * tavor_ci_query_ah()
643 * Return the Address Vector information for a specified Address Handle
644 * Context: Can be called from interrupt or base context.
645 */
646 static ibt_status_t
tavor_ci_query_ah(ibc_hca_hdl_t hca,ibc_ah_hdl_t ah,ibc_pd_hdl_t * pd_p,ibt_adds_vect_t * attr_p)647 tavor_ci_query_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibc_pd_hdl_t *pd_p,
648 ibt_adds_vect_t *attr_p)
649 {
650 tavor_state_t *state;
651 tavor_ahhdl_t ahhdl;
652 tavor_pdhdl_t pdhdl;
653 int status;
654
655 /* Check for valid HCA handle */
656 if (hca == NULL) {
657 return (IBT_HCA_HDL_INVALID);
658 }
659
660 /* Check for valid address handle pointer */
661 if (ah == NULL) {
662 return (IBT_AH_HDL_INVALID);
663 }
664
665 /* Grab the Tavor softstate pointer and AH handle */
666 state = (tavor_state_t *)hca;
667 ahhdl = (tavor_ahhdl_t)ah;
668
669 /* Query the AH */
670 status = tavor_ah_query(state, ahhdl, &pdhdl, attr_p);
671 if (status != DDI_SUCCESS) {
672 return (status);
673 }
674
675 /* Return the Tavor PD handle */
676 *pd_p = (ibc_pd_hdl_t)pdhdl;
677
678 return (IBT_SUCCESS);
679 }
680
681
682 /*
683 * tavor_ci_modify_ah()
684 * Modify the Address Vector information of a specified Address Handle
685 * Context: Can be called from interrupt or base context.
686 */
687 static ibt_status_t
tavor_ci_modify_ah(ibc_hca_hdl_t hca,ibc_ah_hdl_t ah,ibt_adds_vect_t * attr_p)688 tavor_ci_modify_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibt_adds_vect_t *attr_p)
689 {
690 tavor_state_t *state;
691 tavor_ahhdl_t ahhdl;
692 int status;
693
694 /* Check for valid HCA handle */
695 if (hca == NULL) {
696 return (IBT_HCA_HDL_INVALID);
697 }
698
699 /* Check for valid address handle pointer */
700 if (ah == NULL) {
701 return (IBT_AH_HDL_INVALID);
702 }
703
704 /* Grab the Tavor softstate pointer and AH handle */
705 state = (tavor_state_t *)hca;
706 ahhdl = (tavor_ahhdl_t)ah;
707
708 /* Modify the AH */
709 status = tavor_ah_modify(state, ahhdl, attr_p);
710 if (status != DDI_SUCCESS) {
711 return (status);
712 }
713
714 return (IBT_SUCCESS);
715 }
716
717
718 /*
719 * tavor_ci_alloc_qp()
720 * Allocate a Queue Pair
721 * Context: Can be called only from user or kernel context.
722 */
723 static ibt_status_t
tavor_ci_alloc_qp(ibc_hca_hdl_t hca,ibtl_qp_hdl_t ibt_qphdl,ibt_qp_type_t type,ibt_qp_alloc_attr_t * attr_p,ibt_chan_sizes_t * queue_sizes_p,ib_qpn_t * qpn,ibc_qp_hdl_t * qp_p)724 tavor_ci_alloc_qp(ibc_hca_hdl_t hca, ibtl_qp_hdl_t ibt_qphdl,
725 ibt_qp_type_t type, ibt_qp_alloc_attr_t *attr_p,
726 ibt_chan_sizes_t *queue_sizes_p, ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
727 {
728 tavor_state_t *state;
729 tavor_qp_info_t qpinfo;
730 tavor_qp_options_t op;
731 int status;
732
733 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
734 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
735
736 /* Check for valid HCA handle */
737 if (hca == NULL) {
738 return (IBT_HCA_HDL_INVALID);
739 }
740
741 /* Grab the Tavor softstate pointer */
742 state = (tavor_state_t *)hca;
743
744 /* Allocate the QP */
745 qpinfo.qpi_attrp = attr_p;
746 qpinfo.qpi_type = type;
747 qpinfo.qpi_ibt_qphdl = ibt_qphdl;
748 qpinfo.qpi_queueszp = queue_sizes_p;
749 qpinfo.qpi_qpn = qpn;
750 op.qpo_wq_loc = state->ts_cfg_profile->cp_qp_wq_inddr;
751 status = tavor_qp_alloc(state, &qpinfo, TAVOR_NOSLEEP, &op);
752 if (status != DDI_SUCCESS) {
753 return (status);
754 }
755
756 /* Return the Tavor QP handle */
757 *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
758
759 return (IBT_SUCCESS);
760 }
761
762
763 /*
764 * tavor_ci_alloc_special_qp()
765 * Allocate a Special Queue Pair
766 * Context: Can be called only from user or kernel context.
767 */
768 static ibt_status_t
tavor_ci_alloc_special_qp(ibc_hca_hdl_t hca,uint8_t port,ibtl_qp_hdl_t ibt_qphdl,ibt_sqp_type_t type,ibt_qp_alloc_attr_t * attr_p,ibt_chan_sizes_t * queue_sizes_p,ibc_qp_hdl_t * qp_p)769 tavor_ci_alloc_special_qp(ibc_hca_hdl_t hca, uint8_t port,
770 ibtl_qp_hdl_t ibt_qphdl, ibt_sqp_type_t type,
771 ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
772 ibc_qp_hdl_t *qp_p)
773 {
774 tavor_state_t *state;
775 tavor_qp_info_t qpinfo;
776 tavor_qp_options_t op;
777 int status;
778
779 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
780 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
781
782 /* Check for valid HCA handle */
783 if (hca == NULL) {
784 return (IBT_HCA_HDL_INVALID);
785 }
786
787 /* Grab the Tavor softstate pointer */
788 state = (tavor_state_t *)hca;
789
790 /* Allocate the Special QP */
791 qpinfo.qpi_attrp = attr_p;
792 qpinfo.qpi_type = type;
793 qpinfo.qpi_port = port;
794 qpinfo.qpi_ibt_qphdl = ibt_qphdl;
795 qpinfo.qpi_queueszp = queue_sizes_p;
796 op.qpo_wq_loc = state->ts_cfg_profile->cp_qp_wq_inddr;
797 status = tavor_special_qp_alloc(state, &qpinfo, TAVOR_NOSLEEP, &op);
798 if (status != DDI_SUCCESS) {
799 return (status);
800 }
801
802 /* Return the Tavor QP handle */
803 *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
804
805 return (IBT_SUCCESS);
806 }
807
808
809 /* ARGSUSED */
810 static ibt_status_t
tavor_ci_alloc_qp_range(ibc_hca_hdl_t hca,uint_t log2,ibtl_qp_hdl_t * ibtl_qp_p,ibt_qp_type_t type,ibt_qp_alloc_attr_t * attr_p,ibt_chan_sizes_t * queue_sizes_p,ibc_cq_hdl_t * send_cq_p,ibc_cq_hdl_t * recv_cq_p,ib_qpn_t * qpn_p,ibc_qp_hdl_t * qp_p)811 tavor_ci_alloc_qp_range(ibc_hca_hdl_t hca, uint_t log2,
812 ibtl_qp_hdl_t *ibtl_qp_p, ibt_qp_type_t type,
813 ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
814 ibc_cq_hdl_t *send_cq_p, ibc_cq_hdl_t *recv_cq_p,
815 ib_qpn_t *qpn_p, ibc_qp_hdl_t *qp_p)
816 {
817 return (IBT_NOT_SUPPORTED);
818 }
819
820 /*
821 * tavor_ci_free_qp()
822 * Free a Queue Pair
823 * Context: Can be called only from user or kernel context.
824 */
825 static ibt_status_t
tavor_ci_free_qp(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ibc_free_qp_flags_t free_qp_flags,ibc_qpn_hdl_t * qpnh_p)826 tavor_ci_free_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
827 ibc_free_qp_flags_t free_qp_flags, ibc_qpn_hdl_t *qpnh_p)
828 {
829 tavor_state_t *state;
830 tavor_qphdl_t qphdl;
831 int status;
832
833 /* Check for valid HCA handle */
834 if (hca == NULL) {
835 return (IBT_HCA_HDL_INVALID);
836 }
837
838 /* Check for valid QP handle pointer */
839 if (qp == NULL) {
840 return (IBT_QP_HDL_INVALID);
841 }
842
843 /* Grab the Tavor softstate pointer and QP handle */
844 state = (tavor_state_t *)hca;
845 qphdl = (tavor_qphdl_t)qp;
846
847 /* Free the QP */
848 status = tavor_qp_free(state, &qphdl, free_qp_flags, qpnh_p,
849 TAVOR_NOSLEEP);
850 if (status != DDI_SUCCESS) {
851 return (status);
852 }
853
854 return (IBT_SUCCESS);
855 }
856
857
858 /*
859 * tavor_ci_release_qpn()
860 * Release a Queue Pair Number (QPN)
861 * Context: Can be called only from user or kernel context.
862 */
863 static ibt_status_t
tavor_ci_release_qpn(ibc_hca_hdl_t hca,ibc_qpn_hdl_t qpnh)864 tavor_ci_release_qpn(ibc_hca_hdl_t hca, ibc_qpn_hdl_t qpnh)
865 {
866 tavor_state_t *state;
867 tavor_qpn_entry_t *entry;
868
869 /* Check for valid HCA handle */
870 if (hca == NULL) {
871 return (IBT_HCA_HDL_INVALID);
872 }
873
874 /* Check for valid QP handle pointer */
875 if (qpnh == NULL) {
876 return (IBT_QP_HDL_INVALID);
877 }
878
879 /* Grab the Tavor softstate pointer and QP handle */
880 state = (tavor_state_t *)hca;
881 entry = (tavor_qpn_entry_t *)qpnh;
882
883 /* Release the QP number */
884 tavor_qp_release_qpn(state, entry, TAVOR_QPN_RELEASE);
885
886 return (IBT_SUCCESS);
887 }
888
889
890 /*
891 * tavor_ci_query_qp()
892 * Query a Queue Pair
893 * Context: Can be called from interrupt or base context.
894 */
895 static ibt_status_t
tavor_ci_query_qp(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ibt_qp_query_attr_t * attr_p)896 tavor_ci_query_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
897 ibt_qp_query_attr_t *attr_p)
898 {
899 tavor_state_t *state;
900 tavor_qphdl_t qphdl;
901 int status;
902
903 /* Check for valid HCA handle */
904 if (hca == NULL) {
905 return (IBT_HCA_HDL_INVALID);
906 }
907
908 /* Check for valid QP handle */
909 if (qp == NULL) {
910 return (IBT_QP_HDL_INVALID);
911 }
912
913 /* Grab the Tavor softstate pointer and QP handle */
914 state = (tavor_state_t *)hca;
915 qphdl = (tavor_qphdl_t)qp;
916
917 /* Query the QP */
918 status = tavor_qp_query(state, qphdl, attr_p);
919 if (status != DDI_SUCCESS) {
920 return (status);
921 }
922
923 return (IBT_SUCCESS);
924 }
925
926
927 /*
928 * tavor_ci_modify_qp()
929 * Modify a Queue Pair
930 * Context: Can be called from interrupt or base context.
931 */
932 static ibt_status_t
tavor_ci_modify_qp(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ibt_cep_modify_flags_t flags,ibt_qp_info_t * info_p,ibt_queue_sizes_t * actual_sz)933 tavor_ci_modify_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
934 ibt_cep_modify_flags_t flags, ibt_qp_info_t *info_p,
935 ibt_queue_sizes_t *actual_sz)
936 {
937 tavor_state_t *state;
938 tavor_qphdl_t qphdl;
939 int status;
940
941 /* Check for valid HCA handle */
942 if (hca == NULL) {
943 return (IBT_HCA_HDL_INVALID);
944 }
945
946 /* Check for valid QP handle */
947 if (qp == NULL) {
948 return (IBT_QP_HDL_INVALID);
949 }
950
951 /* Grab the Tavor softstate pointer and QP handle */
952 state = (tavor_state_t *)hca;
953 qphdl = (tavor_qphdl_t)qp;
954
955 /* Modify the QP */
956 status = tavor_qp_modify(state, qphdl, flags, info_p, actual_sz);
957 if (status != DDI_SUCCESS) {
958 return (status);
959 }
960
961 return (IBT_SUCCESS);
962 }
963
964
965 /*
966 * tavor_ci_alloc_cq()
967 * Allocate a Completion Queue
968 * Context: Can be called only from user or kernel context.
969 */
970 /* ARGSUSED */
971 static ibt_status_t
tavor_ci_alloc_cq(ibc_hca_hdl_t hca,ibt_cq_hdl_t ibt_cqhdl,ibt_cq_attr_t * attr_p,ibc_cq_hdl_t * cq_p,uint_t * actual_size)972 tavor_ci_alloc_cq(ibc_hca_hdl_t hca, ibt_cq_hdl_t ibt_cqhdl,
973 ibt_cq_attr_t *attr_p, ibc_cq_hdl_t *cq_p, uint_t *actual_size)
974 {
975 tavor_state_t *state;
976 tavor_cqhdl_t cqhdl;
977 int status;
978
979 /* Check for valid HCA handle */
980 if (hca == NULL) {
981 return (IBT_HCA_HDL_INVALID);
982 }
983
984 /* Grab the Tavor softstate pointer */
985 state = (tavor_state_t *)hca;
986
987 /* Allocate the CQ */
988 status = tavor_cq_alloc(state, ibt_cqhdl, attr_p, actual_size,
989 &cqhdl, TAVOR_NOSLEEP);
990 if (status != DDI_SUCCESS) {
991 return (status);
992 }
993
994 /* Return the Tavor CQ handle */
995 *cq_p = (ibc_cq_hdl_t)cqhdl;
996
997 return (IBT_SUCCESS);
998 }
999
1000
1001 /*
1002 * tavor_ci_free_cq()
1003 * Free a Completion Queue
1004 * Context: Can be called only from user or kernel context.
1005 */
1006 static ibt_status_t
tavor_ci_free_cq(ibc_hca_hdl_t hca,ibc_cq_hdl_t cq)1007 tavor_ci_free_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq)
1008 {
1009 tavor_state_t *state;
1010 tavor_cqhdl_t cqhdl;
1011 int status;
1012
1013 /* Check for valid HCA handle */
1014 if (hca == NULL) {
1015 return (IBT_HCA_HDL_INVALID);
1016 }
1017
1018 /* Check for valid CQ handle pointer */
1019 if (cq == NULL) {
1020 return (IBT_CQ_HDL_INVALID);
1021 }
1022
1023 /* Grab the Tavor softstate pointer and CQ handle */
1024 state = (tavor_state_t *)hca;
1025 cqhdl = (tavor_cqhdl_t)cq;
1026
1027 /* Free the CQ */
1028 status = tavor_cq_free(state, &cqhdl, TAVOR_NOSLEEP);
1029 if (status != DDI_SUCCESS) {
1030 return (status);
1031 }
1032
1033 return (IBT_SUCCESS);
1034 }
1035
1036
1037 /*
1038 * tavor_ci_query_cq()
1039 * Return the size of a Completion Queue
1040 * Context: Can be called only from user or kernel context.
1041 */
1042 static ibt_status_t
tavor_ci_query_cq(ibc_hca_hdl_t hca,ibc_cq_hdl_t cq,uint_t * entries_p,uint_t * count_p,uint_t * usec_p,ibt_cq_handler_id_t * hid_p)1043 tavor_ci_query_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t *entries_p,
1044 uint_t *count_p, uint_t *usec_p, ibt_cq_handler_id_t *hid_p)
1045 {
1046 tavor_cqhdl_t cqhdl;
1047
1048 /* Check for valid HCA handle */
1049 if (hca == NULL) {
1050 return (IBT_HCA_HDL_INVALID);
1051 }
1052
1053 /* Check for valid CQ handle pointer */
1054 if (cq == NULL) {
1055 return (IBT_CQ_HDL_INVALID);
1056 }
1057
1058 /* Grab the CQ handle */
1059 cqhdl = (tavor_cqhdl_t)cq;
1060
1061 /* Query the current CQ size */
1062 *entries_p = cqhdl->cq_bufsz;
1063
1064 /* interrupt moderation is not supported */
1065 *count_p = 0;
1066 *usec_p = 0;
1067 *hid_p = 0;
1068
1069 return (IBT_SUCCESS);
1070 }
1071
1072
1073 /*
1074 * tavor_ci_resize_cq()
1075 * Change the size of a Completion Queue
1076 * Context: Can be called only from user or kernel context.
1077 */
1078 static ibt_status_t
tavor_ci_resize_cq(ibc_hca_hdl_t hca,ibc_cq_hdl_t cq,uint_t size,uint_t * actual_size)1079 tavor_ci_resize_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t size,
1080 uint_t *actual_size)
1081 {
1082 tavor_state_t *state;
1083 tavor_cqhdl_t cqhdl;
1084 int status;
1085
1086 /* Check for valid HCA handle */
1087 if (hca == NULL) {
1088 return (IBT_HCA_HDL_INVALID);
1089 }
1090
1091 /* Check for valid CQ handle pointer */
1092 if (cq == NULL) {
1093 return (IBT_CQ_HDL_INVALID);
1094 }
1095
1096 /* Grab the Tavor softstate pointer and CQ handle */
1097 state = (tavor_state_t *)hca;
1098 cqhdl = (tavor_cqhdl_t)cq;
1099
1100 /* Resize the CQ */
1101 status = tavor_cq_resize(state, cqhdl, size, actual_size,
1102 TAVOR_NOSLEEP);
1103 if (status != DDI_SUCCESS) {
1104 return (status);
1105 }
1106
1107 return (IBT_SUCCESS);
1108 }
1109
1110 /*
1111 * CQ interrupt moderation is not supported in tavor.
1112 */
1113
1114 /* ARGSUSED */
1115 static ibt_status_t
tavor_ci_modify_cq(ibc_hca_hdl_t hca,ibc_cq_hdl_t cq,uint_t count,uint_t usec,ibt_cq_handler_id_t hid)1116 tavor_ci_modify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq,
1117 uint_t count, uint_t usec, ibt_cq_handler_id_t hid)
1118 {
1119 return (IBT_NOT_SUPPORTED);
1120 }
1121
1122 /*
1123 * tavor_ci_alloc_cq_sched()
1124 * Reserve a CQ scheduling class resource
1125 * Context: Can be called only from user or kernel context.
1126 */
1127 /* ARGSUSED */
1128 static ibt_status_t
tavor_ci_alloc_cq_sched(ibc_hca_hdl_t hca,ibt_cq_sched_attr_t * attr,ibc_sched_hdl_t * sched_hdl_p)1129 tavor_ci_alloc_cq_sched(ibc_hca_hdl_t hca, ibt_cq_sched_attr_t *attr,
1130 ibc_sched_hdl_t *sched_hdl_p)
1131 {
1132 if (hca == NULL) {
1133 return (IBT_HCA_HDL_INVALID);
1134 }
1135 *sched_hdl_p = NULL;
1136
1137 /*
1138 * This is an unsupported interface for the Tavor driver. Tavor
1139 * does not support CQ scheduling classes.
1140 */
1141 return (IBT_SUCCESS);
1142 }
1143
1144
1145 /*
1146 * tavor_ci_free_cq_sched()
1147 * Free a CQ scheduling class resource
1148 * Context: Can be called only from user or kernel context.
1149 */
1150 /* ARGSUSED */
1151 static ibt_status_t
tavor_ci_free_cq_sched(ibc_hca_hdl_t hca,ibc_sched_hdl_t sched_hdl)1152 tavor_ci_free_cq_sched(ibc_hca_hdl_t hca, ibc_sched_hdl_t sched_hdl)
1153 {
1154 if (hca == NULL) {
1155 return (IBT_HCA_HDL_INVALID);
1156 }
1157
1158 /*
1159 * This is an unsupported interface for the Tavor driver. Tavor
1160 * does not support CQ scheduling classes.
1161 */
1162 return (IBT_SUCCESS);
1163 }
1164
1165
1166 /*
1167 * tavor_ci_alloc_eec()
1168 * Allocate an End-to-End context
1169 * Context: Can be called only from user or kernel context.
1170 */
1171 /* ARGSUSED */
1172 static ibt_status_t
tavor_ci_alloc_eec(ibc_hca_hdl_t hca,ibc_eec_flags_t flags,ibt_eec_hdl_t ibt_eec,ibc_rdd_hdl_t rdd,ibc_eec_hdl_t * eec_p)1173 tavor_ci_alloc_eec(ibc_hca_hdl_t hca, ibc_eec_flags_t flags,
1174 ibt_eec_hdl_t ibt_eec, ibc_rdd_hdl_t rdd, ibc_eec_hdl_t *eec_p)
1175 {
1176 /*
1177 * This is an unsupported interface for the Tavor driver. This
1178 * interface is necessary to support Reliable Datagram (RD)
1179 * operations. Tavor does not support RD.
1180 */
1181
1182 return (IBT_NOT_SUPPORTED);
1183 }
1184
1185
1186 /*
1187 * tavor_ci_free_eec()
1188 * Free an End-to-End context
1189 * Context: Can be called only from user or kernel context.
1190 */
1191 /* ARGSUSED */
1192 static ibt_status_t
tavor_ci_free_eec(ibc_hca_hdl_t hca,ibc_eec_hdl_t eec)1193 tavor_ci_free_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec)
1194 {
1195 /*
1196 * This is an unsupported interface for the Tavor driver. This
1197 * interface is necessary to support Reliable Datagram (RD)
1198 * operations. Tavor does not support RD.
1199 */
1200
1201 return (IBT_NOT_SUPPORTED);
1202 }
1203
1204
1205 /*
1206 * tavor_ci_query_eec()
1207 * Query an End-to-End context
1208 * Context: Can be called from interrupt or base context.
1209 */
1210 /* ARGSUSED */
1211 static ibt_status_t
tavor_ci_query_eec(ibc_hca_hdl_t hca,ibc_eec_hdl_t eec,ibt_eec_query_attr_t * attr_p)1212 tavor_ci_query_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1213 ibt_eec_query_attr_t *attr_p)
1214 {
1215 /*
1216 * This is an unsupported interface for the Tavor driver. This
1217 * interface is necessary to support Reliable Datagram (RD)
1218 * operations. Tavor does not support RD.
1219 */
1220
1221 return (IBT_NOT_SUPPORTED);
1222 }
1223
1224
1225 /*
1226 * tavor_ci_modify_eec()
1227 * Modify an End-to-End context
1228 * Context: Can be called from interrupt or base context.
1229 */
1230 /* ARGSUSED */
1231 static ibt_status_t
tavor_ci_modify_eec(ibc_hca_hdl_t hca,ibc_eec_hdl_t eec,ibt_cep_modify_flags_t flags,ibt_eec_info_t * info_p)1232 tavor_ci_modify_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1233 ibt_cep_modify_flags_t flags, ibt_eec_info_t *info_p)
1234 {
1235 /*
1236 * This is an unsupported interface for the Tavor driver. This
1237 * interface is necessary to support Reliable Datagram (RD)
1238 * operations. Tavor does not support RD.
1239 */
1240
1241 return (IBT_NOT_SUPPORTED);
1242 }
1243
1244
1245 /*
1246 * tavor_ci_register_mr()
1247 * Prepare a virtually addressed Memory Region for use by an HCA
1248 * Context: Can be called from interrupt or base context.
1249 */
1250 /* ARGSUSED */
1251 static ibt_status_t
tavor_ci_register_mr(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_mr_attr_t * mr_attr,void * ibtl_reserved,ibc_mr_hdl_t * mr_p,ibt_mr_desc_t * mr_desc)1252 tavor_ci_register_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1253 ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1254 ibt_mr_desc_t *mr_desc)
1255 {
1256 tavor_mr_options_t op;
1257 tavor_state_t *state;
1258 tavor_pdhdl_t pdhdl;
1259 tavor_mrhdl_t mrhdl;
1260 int status;
1261
1262 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1263
1264 ASSERT(mr_attr != NULL);
1265 ASSERT(mr_p != NULL);
1266 ASSERT(mr_desc != NULL);
1267
1268 /* Check for valid HCA handle */
1269 if (hca == NULL) {
1270 return (IBT_HCA_HDL_INVALID);
1271 }
1272
1273 /* Check for valid PD handle pointer */
1274 if (pd == NULL) {
1275 return (IBT_PD_HDL_INVALID);
1276 }
1277
1278 /*
1279 * Validate the access flags. Both Remote Write and Remote Atomic
1280 * require the Local Write flag to be set
1281 */
1282 if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1283 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1284 !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1285 return (IBT_MR_ACCESS_REQ_INVALID);
1286 }
1287
1288 /* Grab the Tavor softstate pointer and PD handle */
1289 state = (tavor_state_t *)hca;
1290 pdhdl = (tavor_pdhdl_t)pd;
1291
1292 /* Register the memory region */
1293 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1294 op.mro_bind_dmahdl = NULL;
1295 op.mro_bind_override_addr = 0;
1296 status = tavor_mr_register(state, pdhdl, mr_attr, &mrhdl, &op);
1297 if (status != DDI_SUCCESS) {
1298 return (status);
1299 }
1300 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1301
1302 /* Fill in the mr_desc structure */
1303 mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1304 mr_desc->md_lkey = mrhdl->mr_lkey;
1305 /* Only set RKey if remote access was requested */
1306 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1307 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1308 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1309 mr_desc->md_rkey = mrhdl->mr_rkey;
1310 }
1311
1312 /*
1313 * If region is mapped for streaming (i.e. noncoherent), then set
1314 * sync is required
1315 */
1316 mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1317 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1318
1319 /* Return the Tavor MR handle */
1320 *mr_p = (ibc_mr_hdl_t)mrhdl;
1321
1322 return (IBT_SUCCESS);
1323 }
1324
1325
1326 /*
1327 * tavor_ci_register_buf()
1328 * Prepare a Memory Region specified by buf structure for use by an HCA
1329 * Context: Can be called from interrupt or base context.
1330 */
1331 /* ARGSUSED */
1332 static ibt_status_t
tavor_ci_register_buf(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_smr_attr_t * attrp,struct buf * buf,void * ibtl_reserved,ibt_mr_hdl_t * mr_p,ibt_mr_desc_t * mr_desc)1333 tavor_ci_register_buf(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1334 ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1335 ibt_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1336 {
1337 tavor_mr_options_t op;
1338 tavor_state_t *state;
1339 tavor_pdhdl_t pdhdl;
1340 tavor_mrhdl_t mrhdl;
1341 int status;
1342 ibt_mr_flags_t flags = attrp->mr_flags;
1343
1344 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1345
1346 ASSERT(mr_p != NULL);
1347 ASSERT(mr_desc != NULL);
1348
1349 /* Check for valid HCA handle */
1350 if (hca == NULL) {
1351 return (IBT_HCA_HDL_INVALID);
1352 }
1353
1354 /* Check for valid PD handle pointer */
1355 if (pd == NULL) {
1356 return (IBT_PD_HDL_INVALID);
1357 }
1358
1359 /*
1360 * Validate the access flags. Both Remote Write and Remote Atomic
1361 * require the Local Write flag to be set
1362 */
1363 if (((flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1364 (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1365 !(flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1366 return (IBT_MR_ACCESS_REQ_INVALID);
1367 }
1368
1369 /* Grab the Tavor softstate pointer and PD handle */
1370 state = (tavor_state_t *)hca;
1371 pdhdl = (tavor_pdhdl_t)pd;
1372
1373 /* Register the memory region */
1374 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1375 op.mro_bind_dmahdl = NULL;
1376 op.mro_bind_override_addr = 0;
1377 status = tavor_mr_register_buf(state, pdhdl, attrp, buf, &mrhdl, &op);
1378 if (status != DDI_SUCCESS) {
1379 return (status);
1380 }
1381 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1382
1383 /* Fill in the mr_desc structure */
1384 mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1385 mr_desc->md_lkey = mrhdl->mr_lkey;
1386 /* Only set RKey if remote access was requested */
1387 if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1388 (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1389 (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1390 mr_desc->md_rkey = mrhdl->mr_rkey;
1391 }
1392
1393 /*
1394 * If region is mapped for streaming (i.e. noncoherent), then set
1395 * sync is required
1396 */
1397 mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1398 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1399
1400 /* Return the Tavor MR handle */
1401 *mr_p = (ibc_mr_hdl_t)mrhdl;
1402
1403 return (IBT_SUCCESS);
1404 }
1405
1406
1407 /*
1408 * tavor_ci_deregister_mr()
1409 * Deregister a Memory Region from an HCA translation table
1410 * Context: Can be called only from user or kernel context.
1411 */
1412 static ibt_status_t
tavor_ci_deregister_mr(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr)1413 tavor_ci_deregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
1414 {
1415 tavor_state_t *state;
1416 tavor_mrhdl_t mrhdl;
1417 int status;
1418
1419 /* Check for valid HCA handle */
1420 if (hca == NULL) {
1421 return (IBT_HCA_HDL_INVALID);
1422 }
1423
1424 /* Check for valid memory region handle */
1425 if (mr == NULL) {
1426 return (IBT_MR_HDL_INVALID);
1427 }
1428
1429 /* Grab the Tavor softstate pointer */
1430 state = (tavor_state_t *)hca;
1431 mrhdl = (tavor_mrhdl_t)mr;
1432
1433 /*
1434 * Deregister the memory region.
1435 */
1436 status = tavor_mr_deregister(state, &mrhdl, TAVOR_MR_DEREG_ALL,
1437 TAVOR_NOSLEEP);
1438 if (status != DDI_SUCCESS) {
1439 return (status);
1440 }
1441
1442 return (IBT_SUCCESS);
1443 }
1444
1445
1446 /*
1447 * tavor_ci_query_mr()
1448 * Retrieve information about a specified Memory Region
1449 * Context: Can be called from interrupt or base context.
1450 */
1451 static ibt_status_t
tavor_ci_query_mr(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr,ibt_mr_query_attr_t * mr_attr)1452 tavor_ci_query_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1453 ibt_mr_query_attr_t *mr_attr)
1454 {
1455 tavor_state_t *state;
1456 tavor_mrhdl_t mrhdl;
1457 int status;
1458
1459 ASSERT(mr_attr != NULL);
1460
1461 /* Check for valid HCA handle */
1462 if (hca == NULL) {
1463 return (IBT_HCA_HDL_INVALID);
1464 }
1465
1466 /* Check for MemRegion handle */
1467 if (mr == NULL) {
1468 return (IBT_MR_HDL_INVALID);
1469 }
1470
1471 /* Grab the Tavor softstate pointer and MR handle */
1472 state = (tavor_state_t *)hca;
1473 mrhdl = (tavor_mrhdl_t)mr;
1474
1475 /* Query the memory region */
1476 status = tavor_mr_query(state, mrhdl, mr_attr);
1477 if (status != DDI_SUCCESS) {
1478 return (status);
1479 }
1480
1481 return (IBT_SUCCESS);
1482 }
1483
1484
1485 /*
1486 * tavor_ci_register_shared_mr()
1487 * Create a shared memory region matching an existing Memory Region
1488 * Context: Can be called from interrupt or base context.
1489 */
1490 /* ARGSUSED */
1491 static ibt_status_t
tavor_ci_register_shared_mr(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr,ibc_pd_hdl_t pd,ibt_smr_attr_t * mr_attr,void * ibtl_reserved,ibc_mr_hdl_t * mr_p,ibt_mr_desc_t * mr_desc)1492 tavor_ci_register_shared_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1493 ibc_pd_hdl_t pd, ibt_smr_attr_t *mr_attr, void *ibtl_reserved,
1494 ibc_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1495 {
1496 tavor_state_t *state;
1497 tavor_pdhdl_t pdhdl;
1498 tavor_mrhdl_t mrhdl, mrhdl_new;
1499 int status;
1500
1501 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1502
1503 ASSERT(mr_attr != NULL);
1504 ASSERT(mr_p != NULL);
1505 ASSERT(mr_desc != NULL);
1506
1507 /* Check for valid HCA handle */
1508 if (hca == NULL) {
1509 return (IBT_HCA_HDL_INVALID);
1510 }
1511
1512 /* Check for valid PD handle pointer */
1513 if (pd == NULL) {
1514 return (IBT_PD_HDL_INVALID);
1515 }
1516
1517 /* Check for valid memory region handle */
1518 if (mr == NULL) {
1519 return (IBT_MR_HDL_INVALID);
1520 }
1521 /*
1522 * Validate the access flags. Both Remote Write and Remote Atomic
1523 * require the Local Write flag to be set
1524 */
1525 if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1526 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1527 !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1528 return (IBT_MR_ACCESS_REQ_INVALID);
1529 }
1530
1531 /* Grab the Tavor softstate pointer and handles */
1532 state = (tavor_state_t *)hca;
1533 pdhdl = (tavor_pdhdl_t)pd;
1534 mrhdl = (tavor_mrhdl_t)mr;
1535
1536 /* Register the shared memory region */
1537 status = tavor_mr_register_shared(state, mrhdl, pdhdl, mr_attr,
1538 &mrhdl_new);
1539 if (status != DDI_SUCCESS) {
1540 return (status);
1541 }
1542 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1543
1544 /* Fill in the mr_desc structure */
1545 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1546 mr_desc->md_lkey = mrhdl_new->mr_lkey;
1547 /* Only set RKey if remote access was requested */
1548 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1549 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1550 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1551 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1552 }
1553
1554 /*
1555 * If shared region is mapped for streaming (i.e. noncoherent), then
1556 * set sync is required
1557 */
1558 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1559 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1560
1561 /* Return the Tavor MR handle */
1562 *mr_p = (ibc_mr_hdl_t)mrhdl_new;
1563
1564 return (IBT_SUCCESS);
1565 }
1566
1567
1568 /*
1569 * tavor_ci_reregister_mr()
1570 * Modify the attributes of an existing Memory Region
1571 * Context: Can be called from interrupt or base context.
1572 */
1573 /* ARGSUSED */
1574 static ibt_status_t
tavor_ci_reregister_mr(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr,ibc_pd_hdl_t pd,ibt_mr_attr_t * mr_attr,void * ibtl_reserved,ibc_mr_hdl_t * mr_new,ibt_mr_desc_t * mr_desc)1575 tavor_ci_reregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1576 ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_new,
1577 ibt_mr_desc_t *mr_desc)
1578 {
1579 tavor_mr_options_t op;
1580 tavor_state_t *state;
1581 tavor_pdhdl_t pdhdl;
1582 tavor_mrhdl_t mrhdl, mrhdl_new;
1583 int status;
1584
1585 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1586
1587 ASSERT(mr_attr != NULL);
1588 ASSERT(mr_new != NULL);
1589 ASSERT(mr_desc != NULL);
1590
1591 /* Check for valid HCA handle */
1592 if (hca == NULL) {
1593 return (IBT_HCA_HDL_INVALID);
1594 }
1595
1596 /* Check for valid memory region handle */
1597 if (mr == NULL) {
1598 return (IBT_MR_HDL_INVALID);
1599 }
1600
1601 /* Grab the Tavor softstate pointer, mrhdl, and pdhdl */
1602 state = (tavor_state_t *)hca;
1603 mrhdl = (tavor_mrhdl_t)mr;
1604 pdhdl = (tavor_pdhdl_t)pd;
1605
1606 /* Reregister the memory region */
1607 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1608 status = tavor_mr_reregister(state, mrhdl, pdhdl, mr_attr,
1609 &mrhdl_new, &op);
1610 if (status != DDI_SUCCESS) {
1611 return (status);
1612 }
1613 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1614
1615 /* Fill in the mr_desc structure */
1616 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1617 mr_desc->md_lkey = mrhdl_new->mr_lkey;
1618 /* Only set RKey if remote access was requested */
1619 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1620 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1621 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1622 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1623 }
1624
1625 /*
1626 * If region is mapped for streaming (i.e. noncoherent), then set
1627 * sync is required
1628 */
1629 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1630 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1631
1632 /* Return the Tavor MR handle */
1633 *mr_new = (ibc_mr_hdl_t)mrhdl_new;
1634
1635 return (IBT_SUCCESS);
1636 }
1637
1638
1639 /*
1640 * tavor_ci_reregister_buf()
1641 * Modify the attributes of an existing Memory Region
1642 * Context: Can be called from interrupt or base context.
1643 */
1644 /* ARGSUSED */
1645 static ibt_status_t
tavor_ci_reregister_buf(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr,ibc_pd_hdl_t pd,ibt_smr_attr_t * attrp,struct buf * buf,void * ibtl_reserved,ibc_mr_hdl_t * mr_new,ibt_mr_desc_t * mr_desc)1646 tavor_ci_reregister_buf(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1647 ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1648 ibc_mr_hdl_t *mr_new, ibt_mr_desc_t *mr_desc)
1649 {
1650 tavor_mr_options_t op;
1651 tavor_state_t *state;
1652 tavor_pdhdl_t pdhdl;
1653 tavor_mrhdl_t mrhdl, mrhdl_new;
1654 int status;
1655 ibt_mr_flags_t flags = attrp->mr_flags;
1656
1657 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1658
1659 ASSERT(mr_new != NULL);
1660 ASSERT(mr_desc != NULL);
1661
1662 /* Check for valid HCA handle */
1663 if (hca == NULL) {
1664 return (IBT_HCA_HDL_INVALID);
1665 }
1666
1667 /* Check for valid memory region handle */
1668 if (mr == NULL) {
1669 return (IBT_MR_HDL_INVALID);
1670 }
1671
1672 /* Grab the Tavor softstate pointer, mrhdl, and pdhdl */
1673 state = (tavor_state_t *)hca;
1674 mrhdl = (tavor_mrhdl_t)mr;
1675 pdhdl = (tavor_pdhdl_t)pd;
1676
1677 /* Reregister the memory region */
1678 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1679 status = tavor_mr_reregister_buf(state, mrhdl, pdhdl, attrp, buf,
1680 &mrhdl_new, &op);
1681 if (status != DDI_SUCCESS) {
1682 return (status);
1683 }
1684 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1685
1686 /* Fill in the mr_desc structure */
1687 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1688 mr_desc->md_lkey = mrhdl_new->mr_lkey;
1689 /* Only set RKey if remote access was requested */
1690 if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1691 (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1692 (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1693 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1694 }
1695
1696 /*
1697 * If region is mapped for streaming (i.e. noncoherent), then set
1698 * sync is required
1699 */
1700 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1701 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1702
1703 /* Return the Tavor MR handle */
1704 *mr_new = (ibc_mr_hdl_t)mrhdl_new;
1705
1706 return (IBT_SUCCESS);
1707 }
1708
1709 /*
1710 * tavor_ci_sync_mr()
1711 * Synchronize access to a Memory Region
1712 * Context: Can be called from interrupt or base context.
1713 */
1714 static ibt_status_t
tavor_ci_sync_mr(ibc_hca_hdl_t hca,ibt_mr_sync_t * mr_segs,size_t num_segs)1715 tavor_ci_sync_mr(ibc_hca_hdl_t hca, ibt_mr_sync_t *mr_segs, size_t num_segs)
1716 {
1717 tavor_state_t *state;
1718 int status;
1719
1720 ASSERT(mr_segs != NULL);
1721
1722 /* Check for valid HCA handle */
1723 if (hca == NULL) {
1724 return (IBT_HCA_HDL_INVALID);
1725 }
1726
1727 /* Grab the Tavor softstate pointer */
1728 state = (tavor_state_t *)hca;
1729
1730 /* Sync the memory region */
1731 status = tavor_mr_sync(state, mr_segs, num_segs);
1732 if (status != DDI_SUCCESS) {
1733 return (status);
1734 }
1735
1736 return (IBT_SUCCESS);
1737 }
1738
1739
1740 /*
1741 * tavor_ci_alloc_mw()
1742 * Allocate a Memory Window
1743 * Context: Can be called from interrupt or base context.
1744 */
1745 static ibt_status_t
tavor_ci_alloc_mw(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_mw_flags_t flags,ibc_mw_hdl_t * mw_p,ibt_rkey_t * rkey_p)1746 tavor_ci_alloc_mw(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, ibt_mw_flags_t flags,
1747 ibc_mw_hdl_t *mw_p, ibt_rkey_t *rkey_p)
1748 {
1749 tavor_state_t *state;
1750 tavor_pdhdl_t pdhdl;
1751 tavor_mwhdl_t mwhdl;
1752 int status;
1753
1754 ASSERT(mw_p != NULL);
1755 ASSERT(rkey_p != NULL);
1756
1757 /* Check for valid HCA handle */
1758 if (hca == NULL) {
1759 return (IBT_HCA_HDL_INVALID);
1760 }
1761
1762 /* Check for valid PD handle pointer */
1763 if (pd == NULL) {
1764 return (IBT_PD_HDL_INVALID);
1765 }
1766
1767 /* Grab the Tavor softstate pointer and PD handle */
1768 state = (tavor_state_t *)hca;
1769 pdhdl = (tavor_pdhdl_t)pd;
1770
1771 /* Allocate the memory window */
1772 status = tavor_mw_alloc(state, pdhdl, flags, &mwhdl);
1773 if (status != DDI_SUCCESS) {
1774 return (status);
1775 }
1776 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mwhdl))
1777
1778 /* Return the MW handle and RKey */
1779 *mw_p = (ibc_mw_hdl_t)mwhdl;
1780 *rkey_p = mwhdl->mr_rkey;
1781
1782 return (IBT_SUCCESS);
1783 }
1784
1785
1786 /*
1787 * tavor_ci_free_mw()
1788 * Free a Memory Window
1789 * Context: Can be called from interrupt or base context.
1790 */
1791 static ibt_status_t
tavor_ci_free_mw(ibc_hca_hdl_t hca,ibc_mw_hdl_t mw)1792 tavor_ci_free_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw)
1793 {
1794 tavor_state_t *state;
1795 tavor_mwhdl_t mwhdl;
1796 int status;
1797
1798 /* Check for valid HCA handle */
1799 if (hca == NULL) {
1800 return (IBT_HCA_HDL_INVALID);
1801 }
1802
1803 /* Check for valid MW handle */
1804 if (mw == NULL) {
1805 return (IBT_MW_HDL_INVALID);
1806 }
1807
1808 /* Grab the Tavor softstate pointer and MW handle */
1809 state = (tavor_state_t *)hca;
1810 mwhdl = (tavor_mwhdl_t)mw;
1811
1812 /* Free the memory window */
1813 status = tavor_mw_free(state, &mwhdl, TAVOR_NOSLEEP);
1814 if (status != DDI_SUCCESS) {
1815 return (status);
1816 }
1817
1818 return (IBT_SUCCESS);
1819 }
1820
1821
1822 /*
1823 * tavor_ci_query_mw()
1824 * Return the attributes of the specified Memory Window
1825 * Context: Can be called from interrupt or base context.
1826 */
1827 static ibt_status_t
tavor_ci_query_mw(ibc_hca_hdl_t hca,ibc_mw_hdl_t mw,ibt_mw_query_attr_t * mw_attr_p)1828 tavor_ci_query_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw,
1829 ibt_mw_query_attr_t *mw_attr_p)
1830 {
1831 tavor_mwhdl_t mwhdl;
1832
1833 ASSERT(mw_attr_p != NULL);
1834
1835 /* Check for valid HCA handle */
1836 if (hca == NULL) {
1837 return (IBT_HCA_HDL_INVALID);
1838 }
1839
1840 /* Check for valid MemWin handle */
1841 if (mw == NULL) {
1842 return (IBT_MW_HDL_INVALID);
1843 }
1844
1845 /* Query the memory window pointer and fill in the return values */
1846 mwhdl = (tavor_mwhdl_t)mw;
1847 mutex_enter(&mwhdl->mr_lock);
1848 mw_attr_p->mw_pd = (ibc_pd_hdl_t)mwhdl->mr_pdhdl;
1849 mw_attr_p->mw_rkey = mwhdl->mr_rkey;
1850 mutex_exit(&mwhdl->mr_lock);
1851
1852 return (IBT_SUCCESS);
1853 }
1854
1855
1856 /* ARGSUSED */
1857 static ibt_status_t
tavor_ci_register_dma_mr(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_dmr_attr_t * mr_attr,void * ibtl_reserved,ibc_mr_hdl_t * mr_p,ibt_mr_desc_t * mr_desc)1858 tavor_ci_register_dma_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1859 ibt_dmr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1860 ibt_mr_desc_t *mr_desc)
1861 {
1862 tavor_state_t *state;
1863 tavor_pdhdl_t pdhdl;
1864 tavor_mrhdl_t mrhdl;
1865 int status;
1866
1867 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1868
1869 ASSERT(mr_attr != NULL);
1870 ASSERT(mr_p != NULL);
1871 ASSERT(mr_desc != NULL);
1872
1873 /* Check for valid HCA handle */
1874 if (hca == NULL) {
1875 return (IBT_HCA_HDL_INVALID);
1876 }
1877
1878 /* Check for valid PD handle pointer */
1879 if (pd == NULL) {
1880 return (IBT_PD_HDL_INVALID);
1881 }
1882
1883 /*
1884 * Validate the access flags. Both Remote Write and Remote Atomic
1885 * require the Local Write flag to be set
1886 */
1887 if (((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1888 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1889 !(mr_attr->dmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1890 return (IBT_MR_ACCESS_REQ_INVALID);
1891 }
1892
1893 /* Grab the Tavor softstate pointer and PD handle */
1894 state = (tavor_state_t *)hca;
1895 pdhdl = (tavor_pdhdl_t)pd;
1896
1897 status = tavor_dma_mr_register(state, pdhdl, mr_attr, &mrhdl);
1898 if (status != DDI_SUCCESS) {
1899 return (status);
1900 }
1901 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1902
1903 /* Fill in the mr_desc structure */
1904 mr_desc->md_vaddr = mr_attr->dmr_paddr;
1905 mr_desc->md_lkey = mrhdl->mr_lkey;
1906 /* Only set RKey if remote access was requested */
1907 if ((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1908 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1909 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1910 mr_desc->md_rkey = mrhdl->mr_rkey;
1911 }
1912
1913 /*
1914 * If region is mapped for streaming (i.e. noncoherent), then set
1915 * sync is required
1916 */
1917 mr_desc->md_sync_required = B_FALSE;
1918
1919 /* Return the Hermon MR handle */
1920 *mr_p = (ibc_mr_hdl_t)mrhdl;
1921
1922 return (IBT_SUCCESS);
1923 }
1924
1925
1926 /*
1927 * tavor_ci_attach_mcg()
1928 * Attach a Queue Pair to a Multicast Group
1929 * Context: Can be called only from user or kernel context.
1930 */
1931 static ibt_status_t
tavor_ci_attach_mcg(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ib_gid_t gid,ib_lid_t lid)1932 tavor_ci_attach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
1933 ib_lid_t lid)
1934 {
1935 tavor_state_t *state;
1936 tavor_qphdl_t qphdl;
1937 int status;
1938
1939 /* Check for valid HCA handle */
1940 if (hca == NULL) {
1941 return (IBT_HCA_HDL_INVALID);
1942 }
1943
1944 /* Check for valid QP handle pointer */
1945 if (qp == NULL) {
1946 return (IBT_QP_HDL_INVALID);
1947 }
1948
1949 /* Grab the Tavor softstate pointer and QP handles */
1950 state = (tavor_state_t *)hca;
1951 qphdl = (tavor_qphdl_t)qp;
1952
1953 /* Attach the QP to the multicast group */
1954 status = tavor_mcg_attach(state, qphdl, gid, lid);
1955 if (status != DDI_SUCCESS) {
1956 return (status);
1957 }
1958
1959 return (IBT_SUCCESS);
1960 }
1961
1962
1963 /*
1964 * tavor_ci_detach_mcg()
1965 * Detach a Queue Pair to a Multicast Group
1966 * Context: Can be called only from user or kernel context.
1967 */
1968 static ibt_status_t
tavor_ci_detach_mcg(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ib_gid_t gid,ib_lid_t lid)1969 tavor_ci_detach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
1970 ib_lid_t lid)
1971 {
1972 tavor_state_t *state;
1973 tavor_qphdl_t qphdl;
1974 int status;
1975
1976 /* Check for valid HCA handle */
1977 if (hca == NULL) {
1978 return (IBT_HCA_HDL_INVALID);
1979 }
1980
1981 /* Check for valid QP handle pointer */
1982 if (qp == NULL) {
1983 return (IBT_QP_HDL_INVALID);
1984 }
1985
1986 /* Grab the Tavor softstate pointer and QP handle */
1987 state = (tavor_state_t *)hca;
1988 qphdl = (tavor_qphdl_t)qp;
1989
1990 /* Detach the QP from the multicast group */
1991 status = tavor_mcg_detach(state, qphdl, gid, lid);
1992 if (status != DDI_SUCCESS) {
1993 return (status);
1994 }
1995
1996 return (IBT_SUCCESS);
1997 }
1998
1999
2000 /*
2001 * tavor_ci_post_send()
2002 * Post send work requests to the send queue on the specified QP
2003 * Context: Can be called from interrupt or base context.
2004 */
2005 static ibt_status_t
tavor_ci_post_send(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ibt_send_wr_t * wr_p,uint_t num_wr,uint_t * num_posted_p)2006 tavor_ci_post_send(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_send_wr_t *wr_p,
2007 uint_t num_wr, uint_t *num_posted_p)
2008 {
2009 tavor_state_t *state;
2010 tavor_qphdl_t qphdl;
2011 int status;
2012
2013 ASSERT(wr_p != NULL);
2014 ASSERT(num_wr != 0);
2015
2016 /* Check for valid HCA handle */
2017 if (hca == NULL) {
2018 return (IBT_HCA_HDL_INVALID);
2019 }
2020
2021 /* Check for valid QP handle pointer */
2022 if (qp == NULL) {
2023 return (IBT_QP_HDL_INVALID);
2024 }
2025
2026 /* Grab the Tavor softstate pointer and QP handle */
2027 state = (tavor_state_t *)hca;
2028 qphdl = (tavor_qphdl_t)qp;
2029
2030 /* Post the send WQEs */
2031 status = tavor_post_send(state, qphdl, wr_p, num_wr, num_posted_p);
2032 if (status != DDI_SUCCESS) {
2033 return (status);
2034 }
2035
2036 return (IBT_SUCCESS);
2037 }
2038
2039
2040 /*
2041 * tavor_ci_post_recv()
2042 * Post receive work requests to the receive queue on the specified QP
2043 * Context: Can be called from interrupt or base context.
2044 */
2045 static ibt_status_t
tavor_ci_post_recv(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ibt_recv_wr_t * wr_p,uint_t num_wr,uint_t * num_posted_p)2046 tavor_ci_post_recv(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_recv_wr_t *wr_p,
2047 uint_t num_wr, uint_t *num_posted_p)
2048 {
2049 tavor_state_t *state;
2050 tavor_qphdl_t qphdl;
2051 int status;
2052
2053 ASSERT(wr_p != NULL);
2054 ASSERT(num_wr != 0);
2055
2056 /* Check for valid HCA handle */
2057 if (hca == NULL) {
2058 return (IBT_HCA_HDL_INVALID);
2059 }
2060
2061 /* Check for valid QP handle pointer */
2062 if (qp == NULL) {
2063 return (IBT_QP_HDL_INVALID);
2064 }
2065
2066 /* Grab the Tavor softstate pointer and QP handle */
2067 state = (tavor_state_t *)hca;
2068 qphdl = (tavor_qphdl_t)qp;
2069
2070 /* Post the receive WQEs */
2071 status = tavor_post_recv(state, qphdl, wr_p, num_wr, num_posted_p);
2072 if (status != DDI_SUCCESS) {
2073 return (status);
2074 }
2075
2076 return (IBT_SUCCESS);
2077 }
2078
2079
2080 /*
2081 * tavor_ci_poll_cq()
2082 * Poll for a work request completion
2083 * Context: Can be called from interrupt or base context.
2084 */
2085 static ibt_status_t
tavor_ci_poll_cq(ibc_hca_hdl_t hca,ibc_cq_hdl_t cq,ibt_wc_t * wc_p,uint_t num_wc,uint_t * num_polled)2086 tavor_ci_poll_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, ibt_wc_t *wc_p,
2087 uint_t num_wc, uint_t *num_polled)
2088 {
2089 tavor_state_t *state;
2090 tavor_cqhdl_t cqhdl;
2091 uint_t polled;
2092 int status;
2093
2094 ASSERT(wc_p != NULL);
2095
2096 /* Check for valid HCA handle */
2097 if (hca == NULL) {
2098 return (IBT_HCA_HDL_INVALID);
2099 }
2100
2101 /* Check for valid CQ handle pointer */
2102 if (cq == NULL) {
2103 return (IBT_CQ_HDL_INVALID);
2104 }
2105
2106 /* Check for valid num_wc field */
2107 if (num_wc == 0) {
2108 return (IBT_INVALID_PARAM);
2109 }
2110
2111 /* Grab the Tavor softstate pointer and CQ handle */
2112 state = (tavor_state_t *)hca;
2113 cqhdl = (tavor_cqhdl_t)cq;
2114
2115 /* Poll for work request completions */
2116 status = tavor_cq_poll(state, cqhdl, wc_p, num_wc, &polled);
2117
2118 /* First fill in "num_polled" argument (only when valid) */
2119 if (num_polled) {
2120 *num_polled = polled;
2121 }
2122
2123 /*
2124 * Check the status code;
2125 * If empty, we return empty.
2126 * If error, we print out an error and then return
2127 * If success (something was polled), we return success
2128 */
2129 if (status != DDI_SUCCESS) {
2130 return (status);
2131 }
2132
2133 return (IBT_SUCCESS);
2134 }
2135
2136
2137 /*
2138 * tavor_ci_notify_cq()
2139 * Enable notification events on the specified CQ
2140 * Context: Can be called from interrupt or base context.
2141 */
2142 static ibt_status_t
tavor_ci_notify_cq(ibc_hca_hdl_t hca,ibc_cq_hdl_t cq_hdl,ibt_cq_notify_flags_t flags)2143 tavor_ci_notify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq_hdl,
2144 ibt_cq_notify_flags_t flags)
2145 {
2146 tavor_state_t *state;
2147 tavor_cqhdl_t cqhdl;
2148 int status;
2149
2150 /* Check for valid HCA handle */
2151 if (hca == NULL) {
2152 return (IBT_HCA_HDL_INVALID);
2153 }
2154
2155 /* Check for valid CQ handle pointer */
2156 if (cq_hdl == NULL) {
2157 return (IBT_CQ_HDL_INVALID);
2158 }
2159
2160 /* Grab the Tavor softstate pointer and CQ handle */
2161 state = (tavor_state_t *)hca;
2162 cqhdl = (tavor_cqhdl_t)cq_hdl;
2163
2164 /* Enable the CQ notification */
2165 status = tavor_cq_notify(state, cqhdl, flags);
2166 if (status != DDI_SUCCESS) {
2167 return (status);
2168 }
2169
2170 return (IBT_SUCCESS);
2171 }
2172
2173 /*
2174 * tavor_ci_ci_data_in()
2175 * Exchange CI-specific data.
2176 * Context: Can be called only from user or kernel context.
2177 */
2178 static ibt_status_t
tavor_ci_ci_data_in(ibc_hca_hdl_t hca,ibt_ci_data_flags_t flags,ibt_object_type_t object,void * ibc_object_handle,void * data_p,size_t data_sz)2179 tavor_ci_ci_data_in(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
2180 ibt_object_type_t object, void *ibc_object_handle, void *data_p,
2181 size_t data_sz)
2182 {
2183 tavor_state_t *state;
2184 int status;
2185
2186 /* Check for valid HCA handle */
2187 if (hca == NULL) {
2188 return (IBT_HCA_HDL_INVALID);
2189 }
2190
2191 /* Grab the Tavor softstate pointer */
2192 state = (tavor_state_t *)hca;
2193
2194 /* Get the Tavor userland mapping information */
2195 status = tavor_umap_ci_data_in(state, flags, object,
2196 ibc_object_handle, data_p, data_sz);
2197 if (status != DDI_SUCCESS) {
2198 return (status);
2199 }
2200
2201 return (IBT_SUCCESS);
2202 }
2203
2204 /*
2205 * tavor_ci_ci_data_out()
2206 * Exchange CI-specific data.
2207 * Context: Can be called only from user or kernel context.
2208 */
2209 static ibt_status_t
tavor_ci_ci_data_out(ibc_hca_hdl_t hca,ibt_ci_data_flags_t flags,ibt_object_type_t object,void * ibc_object_handle,void * data_p,size_t data_sz)2210 tavor_ci_ci_data_out(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
2211 ibt_object_type_t object, void *ibc_object_handle, void *data_p,
2212 size_t data_sz)
2213 {
2214 tavor_state_t *state;
2215 int status;
2216
2217 /* Check for valid HCA handle */
2218 if (hca == NULL) {
2219 return (IBT_HCA_HDL_INVALID);
2220 }
2221
2222 /* Grab the Tavor softstate pointer */
2223 state = (tavor_state_t *)hca;
2224
2225 /* Get the Tavor userland mapping information */
2226 status = tavor_umap_ci_data_out(state, flags, object,
2227 ibc_object_handle, data_p, data_sz);
2228 if (status != DDI_SUCCESS) {
2229 return (status);
2230 }
2231
2232 return (IBT_SUCCESS);
2233 }
2234
2235
2236 /*
2237 * tavor_ci_alloc_srq()
2238 * Allocate a Shared Receive Queue (SRQ)
2239 * Context: Can be called only from user or kernel context
2240 */
2241 static ibt_status_t
tavor_ci_alloc_srq(ibc_hca_hdl_t hca,ibt_srq_flags_t flags,ibt_srq_hdl_t ibt_srq,ibc_pd_hdl_t pd,ibt_srq_sizes_t * sizes,ibc_srq_hdl_t * ibc_srq_p,ibt_srq_sizes_t * ret_sizes_p)2242 tavor_ci_alloc_srq(ibc_hca_hdl_t hca, ibt_srq_flags_t flags,
2243 ibt_srq_hdl_t ibt_srq, ibc_pd_hdl_t pd, ibt_srq_sizes_t *sizes,
2244 ibc_srq_hdl_t *ibc_srq_p, ibt_srq_sizes_t *ret_sizes_p)
2245 {
2246 tavor_state_t *state;
2247 tavor_pdhdl_t pdhdl;
2248 tavor_srqhdl_t srqhdl;
2249 tavor_srq_info_t srqinfo;
2250 tavor_srq_options_t op;
2251 int status;
2252
2253 /* Check for valid HCA handle */
2254 if (hca == NULL) {
2255 return (IBT_HCA_HDL_INVALID);
2256 }
2257
2258 state = (tavor_state_t *)hca;
2259
2260 /* Check if SRQ is even supported */
2261 if (state->ts_cfg_profile->cp_srq_enable == 0) {
2262 return (IBT_NOT_SUPPORTED);
2263 }
2264
2265 /* Check for valid PD handle pointer */
2266 if (pd == NULL) {
2267 return (IBT_PD_HDL_INVALID);
2268 }
2269
2270 pdhdl = (tavor_pdhdl_t)pd;
2271
2272 srqinfo.srqi_ibt_srqhdl = ibt_srq;
2273 srqinfo.srqi_pd = pdhdl;
2274 srqinfo.srqi_sizes = sizes;
2275 srqinfo.srqi_real_sizes = ret_sizes_p;
2276 srqinfo.srqi_srqhdl = &srqhdl;
2277 srqinfo.srqi_flags = flags;
2278 op.srqo_wq_loc = state->ts_cfg_profile->cp_srq_wq_inddr;
2279 status = tavor_srq_alloc(state, &srqinfo, TAVOR_NOSLEEP, &op);
2280 if (status != DDI_SUCCESS) {
2281 return (status);
2282 }
2283
2284 *ibc_srq_p = (ibc_srq_hdl_t)srqhdl;
2285
2286 return (IBT_SUCCESS);
2287 }
2288
2289 /*
2290 * tavor_ci_free_srq()
2291 * Free a Shared Receive Queue (SRQ)
2292 * Context: Can be called only from user or kernel context
2293 */
2294 static ibt_status_t
tavor_ci_free_srq(ibc_hca_hdl_t hca,ibc_srq_hdl_t srq)2295 tavor_ci_free_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq)
2296 {
2297 tavor_state_t *state;
2298 tavor_srqhdl_t srqhdl;
2299 int status;
2300
2301 /* Check for valid HCA handle */
2302 if (hca == NULL) {
2303 return (IBT_HCA_HDL_INVALID);
2304 }
2305
2306 state = (tavor_state_t *)hca;
2307
2308 /* Check if SRQ is even supported */
2309 if (state->ts_cfg_profile->cp_srq_enable == 0) {
2310 return (IBT_NOT_SUPPORTED);
2311 }
2312
2313 /* Check for valid SRQ handle pointer */
2314 if (srq == NULL) {
2315 return (IBT_SRQ_HDL_INVALID);
2316 }
2317
2318 srqhdl = (tavor_srqhdl_t)srq;
2319
2320 /* Free the SRQ */
2321 status = tavor_srq_free(state, &srqhdl, TAVOR_NOSLEEP);
2322 if (status != DDI_SUCCESS) {
2323 return (status);
2324 }
2325
2326 return (IBT_SUCCESS);
2327 }
2328
2329 /*
2330 * tavor_ci_query_srq()
2331 * Query properties of a Shared Receive Queue (SRQ)
2332 * Context: Can be called from interrupt or base context.
2333 */
2334 static ibt_status_t
tavor_ci_query_srq(ibc_hca_hdl_t hca,ibc_srq_hdl_t srq,ibc_pd_hdl_t * pd_p,ibt_srq_sizes_t * sizes_p,uint_t * limit_p)2335 tavor_ci_query_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq, ibc_pd_hdl_t *pd_p,
2336 ibt_srq_sizes_t *sizes_p, uint_t *limit_p)
2337 {
2338 tavor_state_t *state;
2339 tavor_srqhdl_t srqhdl;
2340
2341 /* Check for valid HCA handle */
2342 if (hca == NULL) {
2343 return (IBT_HCA_HDL_INVALID);
2344 }
2345
2346 state = (tavor_state_t *)hca;
2347
2348 /* Check if SRQ is even supported */
2349 if (state->ts_cfg_profile->cp_srq_enable == 0) {
2350 return (IBT_NOT_SUPPORTED);
2351 }
2352
2353 /* Check for valid SRQ handle pointer */
2354 if (srq == NULL) {
2355 return (IBT_SRQ_HDL_INVALID);
2356 }
2357
2358 srqhdl = (tavor_srqhdl_t)srq;
2359
2360 mutex_enter(&srqhdl->srq_lock);
2361 if (srqhdl->srq_state == TAVOR_SRQ_STATE_ERROR) {
2362 mutex_exit(&srqhdl->srq_lock);
2363 return (IBT_SRQ_ERROR_STATE);
2364 }
2365
2366 *pd_p = (ibc_pd_hdl_t)srqhdl->srq_pdhdl;
2367 sizes_p->srq_wr_sz = srqhdl->srq_real_sizes.srq_wr_sz;
2368 sizes_p->srq_sgl_sz = srqhdl->srq_real_sizes.srq_sgl_sz;
2369 mutex_exit(&srqhdl->srq_lock);
2370 *limit_p = 0;
2371
2372 return (IBT_SUCCESS);
2373 }
2374
2375 /*
2376 * tavor_ci_modify_srq()
2377 * Modify properties of a Shared Receive Queue (SRQ)
2378 * Context: Can be called from interrupt or base context.
2379 */
2380 /* ARGSUSED */
2381 static ibt_status_t
tavor_ci_modify_srq(ibc_hca_hdl_t hca,ibc_srq_hdl_t srq,ibt_srq_modify_flags_t flags,uint_t size,uint_t limit,uint_t * ret_size_p)2382 tavor_ci_modify_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
2383 ibt_srq_modify_flags_t flags, uint_t size, uint_t limit, uint_t *ret_size_p)
2384 {
2385 tavor_state_t *state;
2386 tavor_srqhdl_t srqhdl;
2387 uint_t resize_supported, cur_srq_size;
2388 int status;
2389
2390 /* Check for valid HCA handle */
2391 if (hca == NULL) {
2392 return (IBT_HCA_HDL_INVALID);
2393 }
2394
2395 state = (tavor_state_t *)hca;
2396
2397 /* Check if SRQ is even supported */
2398 if (state->ts_cfg_profile->cp_srq_enable == 0) {
2399 return (IBT_NOT_SUPPORTED);
2400 }
2401
2402 /* Check for valid SRQ handle pointer */
2403 if (srq == NULL) {
2404 return (IBT_SRQ_HDL_INVALID);
2405 }
2406
2407 srqhdl = (tavor_srqhdl_t)srq;
2408
2409 /*
2410 * Check Error State of SRQ.
2411 * Also, while we are holding the lock we save away the current SRQ
2412 * size for later use.
2413 */
2414 mutex_enter(&srqhdl->srq_lock);
2415 cur_srq_size = srqhdl->srq_wq_bufsz;
2416 if (srqhdl->srq_state == TAVOR_SRQ_STATE_ERROR) {
2417 mutex_exit(&srqhdl->srq_lock);
2418 return (IBT_SRQ_ERROR_STATE);
2419 }
2420 mutex_exit(&srqhdl->srq_lock);
2421
2422 /*
2423 * Setting the limit watermark is not currently supported. This is a
2424 * tavor hardware (firmware) limitation. We return NOT_SUPPORTED here,
2425 * and have the limit code commented out for now.
2426 *
2427 * XXX If we enable the limit watermark support, we need to do checks
2428 * and set the 'srq->srq_wr_limit' here, instead of returning not
2429 * supported. The 'tavor_srq_modify' operation below is for resizing
2430 * the SRQ only, the limit work should be done here. If this is
2431 * changed to use the 'limit' field, the 'ARGSUSED' comment for this
2432 * function should also be removed at that time.
2433 */
2434 if (flags & IBT_SRQ_SET_LIMIT) {
2435 return (IBT_NOT_SUPPORTED);
2436 }
2437
2438 /*
2439 * Check the SET_SIZE flag. If not set, we simply return success here.
2440 * However if it is set, we check if resize is supported and only then
2441 * do we continue on with our resize processing.
2442 */
2443 if (!(flags & IBT_SRQ_SET_SIZE)) {
2444 return (IBT_SUCCESS);
2445 }
2446
2447 resize_supported = state->ts_ibtfinfo.hca_attr->hca_flags &
2448 IBT_HCA_RESIZE_SRQ;
2449
2450 if ((flags & IBT_SRQ_SET_SIZE) && !resize_supported) {
2451 return (IBT_NOT_SUPPORTED);
2452 }
2453
2454 /*
2455 * We do not support resizing an SRQ to be smaller than it's current
2456 * size. If a smaller (or equal) size is requested, then we simply
2457 * return success, and do nothing.
2458 */
2459 if (size <= cur_srq_size) {
2460 *ret_size_p = cur_srq_size;
2461 return (IBT_SUCCESS);
2462 }
2463
2464 status = tavor_srq_modify(state, srqhdl, size, ret_size_p,
2465 TAVOR_NOSLEEP);
2466 if (status != DDI_SUCCESS) {
2467 /* Set return value to current SRQ size */
2468 *ret_size_p = cur_srq_size;
2469 return (status);
2470 }
2471
2472 return (IBT_SUCCESS);
2473 }
2474
2475 /*
2476 * tavor_ci_post_srq()
2477 * Post a Work Request to the specified Shared Receive Queue (SRQ)
2478 * Context: Can be called from interrupt or base context.
2479 */
2480 static ibt_status_t
tavor_ci_post_srq(ibc_hca_hdl_t hca,ibc_srq_hdl_t srq,ibt_recv_wr_t * wr,uint_t num_wr,uint_t * num_posted_p)2481 tavor_ci_post_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
2482 ibt_recv_wr_t *wr, uint_t num_wr, uint_t *num_posted_p)
2483 {
2484 tavor_state_t *state;
2485 tavor_srqhdl_t srqhdl;
2486 int status;
2487
2488 /* Check for valid HCA handle */
2489 if (hca == NULL) {
2490 return (IBT_HCA_HDL_INVALID);
2491 }
2492
2493 state = (tavor_state_t *)hca;
2494
2495 /* Check if SRQ is even supported */
2496 if (state->ts_cfg_profile->cp_srq_enable == 0) {
2497 return (IBT_NOT_SUPPORTED);
2498 }
2499
2500 /* Check for valid SRQ handle pointer */
2501 if (srq == NULL) {
2502 return (IBT_SRQ_HDL_INVALID);
2503 }
2504
2505 srqhdl = (tavor_srqhdl_t)srq;
2506
2507 status = tavor_post_srq(state, srqhdl, wr, num_wr, num_posted_p);
2508 if (status != DDI_SUCCESS) {
2509 return (status);
2510 }
2511
2512 return (IBT_SUCCESS);
2513 }
2514
2515 /* Address translation */
2516 /*
2517 * tavor_ci_map_mem_area()
2518 * Context: Can be called from interrupt or base context.
2519 */
2520 /* ARGSUSED */
2521 static ibt_status_t
tavor_ci_map_mem_area(ibc_hca_hdl_t hca,ibt_va_attr_t * va_attrs,void * ibtl_reserved,uint_t list_len,ibt_reg_req_t * reg_req,ibc_ma_hdl_t * ibc_ma_hdl_p)2522 tavor_ci_map_mem_area(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs,
2523 void *ibtl_reserved, uint_t list_len, ibt_reg_req_t *reg_req,
2524 ibc_ma_hdl_t *ibc_ma_hdl_p)
2525 {
2526 return (IBT_NOT_SUPPORTED);
2527 }
2528
2529 /*
2530 * tavor_ci_unmap_mem_area()
2531 * Unmap the memory area
2532 * Context: Can be called from interrupt or base context.
2533 */
2534 /* ARGSUSED */
2535 static ibt_status_t
tavor_ci_unmap_mem_area(ibc_hca_hdl_t hca,ibc_ma_hdl_t ma_hdl)2536 tavor_ci_unmap_mem_area(ibc_hca_hdl_t hca, ibc_ma_hdl_t ma_hdl)
2537 {
2538 return (IBT_NOT_SUPPORTED);
2539 }
2540
2541 struct ibc_mi_s {
2542 int imh_len;
2543 ddi_dma_handle_t imh_dmahandle[1];
2544 };
2545 _NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2546 ibc_mi_s::imh_len
2547 ibc_mi_s::imh_dmahandle))
2548
2549
2550 /*
2551 * tavor_ci_map_mem_iov()
2552 * Map the memory
2553 * Context: Can be called from interrupt or base context.
2554 */
2555 /* ARGSUSED */
2556 static ibt_status_t
tavor_ci_map_mem_iov(ibc_hca_hdl_t hca,ibt_iov_attr_t * iov_attr,ibt_all_wr_t * wr,ibc_mi_hdl_t * mi_hdl_p)2557 tavor_ci_map_mem_iov(ibc_hca_hdl_t hca, ibt_iov_attr_t *iov_attr,
2558 ibt_all_wr_t *wr, ibc_mi_hdl_t *mi_hdl_p)
2559 {
2560 int status;
2561 int i, j, nds, max_nds;
2562 uint_t len;
2563 ibt_status_t ibt_status;
2564 ddi_dma_handle_t dmahdl;
2565 ddi_dma_cookie_t dmacookie;
2566 ddi_dma_attr_t dma_attr;
2567 uint_t cookie_cnt;
2568 ibc_mi_hdl_t mi_hdl;
2569 ibt_lkey_t rsvd_lkey;
2570 ibt_wr_ds_t *sgl;
2571 tavor_state_t *state;
2572 int kmflag;
2573 int (*callback)(caddr_t);
2574
2575 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wr))
2576
2577 if (mi_hdl_p == NULL)
2578 return (IBT_MI_HDL_INVALID);
2579
2580 /* Check for valid HCA handle */
2581 if (hca == NULL)
2582 return (IBT_HCA_HDL_INVALID);
2583
2584 /* Tavor does not allow the default "use reserved lkey" */
2585 if ((iov_attr->iov_flags & IBT_IOV_ALT_LKEY) == 0)
2586 return (IBT_INVALID_PARAM);
2587
2588 rsvd_lkey = iov_attr->iov_alt_lkey;
2589
2590 state = (tavor_state_t *)hca;
2591 tavor_dma_attr_init(&dma_attr);
2592 #ifdef __sparc
2593 if (state->ts_cfg_profile->cp_iommu_bypass == TAVOR_BINDMEM_BYPASS)
2594 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2595 #endif
2596
2597 nds = 0;
2598 max_nds = iov_attr->iov_wr_nds;
2599 if (iov_attr->iov_lso_hdr_sz)
2600 max_nds -= (iov_attr->iov_lso_hdr_sz + sizeof (uint32_t) +
2601 0xf) >> 4; /* 0xf is for rounding up to a multiple of 16 */
2602 if ((iov_attr->iov_flags & IBT_IOV_NOSLEEP) == 0) {
2603 kmflag = KM_SLEEP;
2604 callback = DDI_DMA_SLEEP;
2605 } else {
2606 kmflag = KM_NOSLEEP;
2607 callback = DDI_DMA_DONTWAIT;
2608 }
2609
2610 if (iov_attr->iov_flags & IBT_IOV_BUF) {
2611 mi_hdl = kmem_alloc(sizeof (*mi_hdl), kmflag);
2612 if (mi_hdl == NULL)
2613 return (IBT_INSUFF_RESOURCE);
2614 sgl = wr->send.wr_sgl;
2615 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
2616
2617 status = ddi_dma_alloc_handle(state->ts_dip, &dma_attr,
2618 callback, NULL, &dmahdl);
2619 if (status != DDI_SUCCESS) {
2620 kmem_free(mi_hdl, sizeof (*mi_hdl));
2621 return (IBT_INSUFF_RESOURCE);
2622 }
2623 status = ddi_dma_buf_bind_handle(dmahdl, iov_attr->iov_buf,
2624 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
2625 &dmacookie, &cookie_cnt);
2626 if (status != DDI_DMA_MAPPED) {
2627 ddi_dma_free_handle(&dmahdl);
2628 kmem_free(mi_hdl, sizeof (*mi_hdl));
2629 return (ibc_get_ci_failure(0));
2630 }
2631 while (cookie_cnt-- > 0) {
2632 if (nds > max_nds) {
2633 status = ddi_dma_unbind_handle(dmahdl);
2634 ddi_dma_free_handle(&dmahdl);
2635 return (IBT_SGL_TOO_SMALL);
2636 }
2637 sgl[nds].ds_va = dmacookie.dmac_laddress;
2638 sgl[nds].ds_key = rsvd_lkey;
2639 sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
2640 nds++;
2641 if (cookie_cnt != 0)
2642 ddi_dma_nextcookie(dmahdl, &dmacookie);
2643 }
2644 wr->send.wr_nds = nds;
2645 mi_hdl->imh_len = 1;
2646 mi_hdl->imh_dmahandle[0] = dmahdl;
2647 *mi_hdl_p = mi_hdl;
2648 return (IBT_SUCCESS);
2649 }
2650
2651 if (iov_attr->iov_flags & IBT_IOV_RECV)
2652 sgl = wr->recv.wr_sgl;
2653 else
2654 sgl = wr->send.wr_sgl;
2655 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
2656
2657 len = iov_attr->iov_list_len;
2658 for (i = 0, j = 0; j < len; j++) {
2659 if (iov_attr->iov[j].iov_len == 0)
2660 continue;
2661 i++;
2662 }
2663 mi_hdl = kmem_alloc(sizeof (*mi_hdl) +
2664 (i - 1) * sizeof (ddi_dma_handle_t), kmflag);
2665 if (mi_hdl == NULL)
2666 return (IBT_INSUFF_RESOURCE);
2667 mi_hdl->imh_len = i;
2668 for (i = 0, j = 0; j < len; j++) {
2669 if (iov_attr->iov[j].iov_len == 0)
2670 continue;
2671 status = ddi_dma_alloc_handle(state->ts_dip, &dma_attr,
2672 callback, NULL, &dmahdl);
2673 if (status != DDI_SUCCESS) {
2674 ibt_status = IBT_INSUFF_RESOURCE;
2675 goto fail2;
2676 }
2677 status = ddi_dma_addr_bind_handle(dmahdl, iov_attr->iov_as,
2678 iov_attr->iov[j].iov_addr, iov_attr->iov[j].iov_len,
2679 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
2680 &dmacookie, &cookie_cnt);
2681 if (status != DDI_DMA_MAPPED) {
2682 ibt_status = ibc_get_ci_failure(0);
2683 goto fail1;
2684 }
2685 if (nds + cookie_cnt > max_nds) {
2686 ibt_status = IBT_SGL_TOO_SMALL;
2687 goto fail2;
2688 }
2689 while (cookie_cnt-- > 0) {
2690 sgl[nds].ds_va = dmacookie.dmac_laddress;
2691 sgl[nds].ds_key = rsvd_lkey;
2692 sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
2693 nds++;
2694 if (cookie_cnt != 0)
2695 ddi_dma_nextcookie(dmahdl, &dmacookie);
2696 }
2697 mi_hdl->imh_dmahandle[i] = dmahdl;
2698 i++;
2699 }
2700
2701 if (iov_attr->iov_flags & IBT_IOV_RECV)
2702 wr->recv.wr_nds = nds;
2703 else
2704 wr->send.wr_nds = nds;
2705 *mi_hdl_p = mi_hdl;
2706 return (IBT_SUCCESS);
2707
2708 fail1:
2709 ddi_dma_free_handle(&dmahdl);
2710 fail2:
2711 while (--i >= 0) {
2712 status = ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
2713 ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
2714 }
2715 kmem_free(mi_hdl, sizeof (*mi_hdl) +
2716 (len - 1) * sizeof (ddi_dma_handle_t));
2717 *mi_hdl_p = NULL;
2718 return (ibt_status);
2719 }
2720
2721 /*
2722 * tavor_ci_unmap_mem_iov()
2723 * Unmap the memory
2724 * Context: Can be called from interrupt or base context.
2725 */
2726 /* ARGSUSED */
2727 static ibt_status_t
tavor_ci_unmap_mem_iov(ibc_hca_hdl_t hca,ibc_mi_hdl_t mi_hdl)2728 tavor_ci_unmap_mem_iov(ibc_hca_hdl_t hca, ibc_mi_hdl_t mi_hdl)
2729 {
2730 int i;
2731
2732 /* Check for valid HCA handle */
2733 if (hca == NULL)
2734 return (IBT_HCA_HDL_INVALID);
2735
2736 if (mi_hdl == NULL)
2737 return (IBT_MI_HDL_INVALID);
2738
2739 for (i = 0; i < mi_hdl->imh_len; i++) {
2740 (void) ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
2741 ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
2742 }
2743 kmem_free(mi_hdl, sizeof (*mi_hdl) +
2744 (mi_hdl->imh_len - 1) * sizeof (ddi_dma_handle_t));
2745 return (IBT_SUCCESS);
2746 }
2747
2748 /* Allocate L_Key */
2749 /*
2750 * tavor_ci_alloc_lkey()
2751 */
2752 /* ARGSUSED */
2753 static ibt_status_t
tavor_ci_alloc_lkey(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_lkey_flags_t flags,uint_t phys_buf_list_sz,ibc_mr_hdl_t * mr_p,ibt_pmr_desc_t * mem_desc_p)2754 tavor_ci_alloc_lkey(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2755 ibt_lkey_flags_t flags, uint_t phys_buf_list_sz, ibc_mr_hdl_t *mr_p,
2756 ibt_pmr_desc_t *mem_desc_p)
2757 {
2758 return (IBT_NOT_SUPPORTED);
2759 }
2760
2761 /* Physical Register Memory Region */
2762 /*
2763 * tavor_ci_register_physical_mr()
2764 */
2765 /* ARGSUSED */
2766 static ibt_status_t
tavor_ci_register_physical_mr(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_pmr_attr_t * mem_pattrs,void * ibtl_reserved,ibc_mr_hdl_t * mr_p,ibt_pmr_desc_t * mem_desc_p)2767 tavor_ci_register_physical_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2768 ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
2769 ibt_pmr_desc_t *mem_desc_p)
2770 {
2771 return (IBT_NOT_SUPPORTED);
2772 }
2773
2774 /*
2775 * tavor_ci_reregister_physical_mr()
2776 */
2777 /* ARGSUSED */
2778 static ibt_status_t
tavor_ci_reregister_physical_mr(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr,ibc_pd_hdl_t pd,ibt_pmr_attr_t * mem_pattrs,void * ibtl_reserved,ibc_mr_hdl_t * mr_p,ibt_pmr_desc_t * mr_desc_p)2779 tavor_ci_reregister_physical_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
2780 ibc_pd_hdl_t pd, ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved,
2781 ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mr_desc_p)
2782 {
2783 return (IBT_NOT_SUPPORTED);
2784 }
2785
2786 /* Mellanox FMR Support */
2787 /*
2788 * tavor_ci_create_fmr_pool()
2789 * Creates a pool of memory regions suitable for FMR registration
2790 * Context: Can be called from base context only
2791 */
2792 /* ARGSUSED */
2793 static ibt_status_t
tavor_ci_create_fmr_pool(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_fmr_pool_attr_t * params,ibc_fmr_pool_hdl_t * fmr_pool_p)2794 tavor_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2795 ibt_fmr_pool_attr_t *params, ibc_fmr_pool_hdl_t *fmr_pool_p)
2796 {
2797 return (IBT_NOT_SUPPORTED);
2798 }
2799
2800 /*
2801 * tavor_ci_destroy_fmr_pool()
2802 * Free all resources associated with an FMR pool.
2803 * Context: Can be called from base context only.
2804 */
2805 /* ARGSUSED */
2806 static ibt_status_t
tavor_ci_destroy_fmr_pool(ibc_hca_hdl_t hca,ibc_fmr_pool_hdl_t fmr_pool)2807 tavor_ci_destroy_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
2808 {
2809 return (IBT_NOT_SUPPORTED);
2810 }
2811
2812 /*
2813 * tavor_ci_flush_fmr_pool()
2814 * Force a flush of the memory tables, cleaning up used FMR resources.
2815 * Context: Can be called from interrupt or base context.
2816 */
2817 /* ARGSUSED */
2818 static ibt_status_t
tavor_ci_flush_fmr_pool(ibc_hca_hdl_t hca,ibc_fmr_pool_hdl_t fmr_pool)2819 tavor_ci_flush_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
2820 {
2821 return (IBT_NOT_SUPPORTED);
2822 }
2823
2824 /*
2825 * tavor_ci_register_physical_fmr()
2826 * From the 'pool' of FMR regions passed in, performs register physical
2827 * operation.
2828 * Context: Can be called from interrupt or base context.
2829 */
2830 /* ARGSUSED */
2831 static ibt_status_t
tavor_ci_register_physical_fmr(ibc_hca_hdl_t hca,ibc_fmr_pool_hdl_t fmr_pool,ibt_pmr_attr_t * mem_pattr,void * ibtl_reserved,ibc_mr_hdl_t * mr_p,ibt_pmr_desc_t * mem_desc_p)2832 tavor_ci_register_physical_fmr(ibc_hca_hdl_t hca,
2833 ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
2834 void *ibtl_reserved, ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mem_desc_p)
2835 {
2836 return (IBT_NOT_SUPPORTED);
2837 }
2838
2839 /*
2840 * tavor_ci_deregister_fmr()
2841 * Moves an FMR (specified by 'mr') to the deregistered state.
2842 * Context: Can be called from base context only.
2843 */
2844 /* ARGSUSED */
2845 static ibt_status_t
tavor_ci_deregister_fmr(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr)2846 tavor_ci_deregister_fmr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
2847 {
2848 return (IBT_NOT_SUPPORTED);
2849 }
2850
2851 /*
2852 * tavor_ci_alloc_io_mem()
2853 * Allocate dmable memory
2854 *
2855 */
2856 ibt_status_t
tavor_ci_alloc_io_mem(ibc_hca_hdl_t hca,size_t size,ibt_mr_flags_t mr_flag,caddr_t * kaddrp,ibc_mem_alloc_hdl_t * mem_alloc_hdl)2857 tavor_ci_alloc_io_mem(
2858 ibc_hca_hdl_t hca,
2859 size_t size,
2860 ibt_mr_flags_t mr_flag,
2861 caddr_t *kaddrp,
2862 ibc_mem_alloc_hdl_t *mem_alloc_hdl)
2863 {
2864 tavor_state_t *state;
2865 int status;
2866
2867 /* Check for valid HCA handle */
2868 if (hca == NULL) {
2869 return (IBT_HCA_HDL_INVALID);
2870 }
2871
2872 /* Check for valid mem_alloc_hdl handle pointer */
2873 if (mem_alloc_hdl == NULL) {
2874 return (IBT_MEM_ALLOC_HDL_INVALID);
2875 }
2876
2877 /* Grab the Tavor softstate pointer and mem handle */
2878 state = (tavor_state_t *)hca;
2879
2880 /* Allocate the AH */
2881 status = tavor_mem_alloc(state, size, mr_flag, kaddrp,
2882 (tavor_mem_alloc_hdl_t *)mem_alloc_hdl);
2883
2884 if (status != DDI_SUCCESS) {
2885 return (status);
2886 }
2887
2888 return (IBT_SUCCESS);
2889 }
2890
2891
2892 /*
2893 * tavor_ci_free_io_mem()
2894 * free the memory
2895 */
2896 ibt_status_t
tavor_ci_free_io_mem(ibc_hca_hdl_t hca,ibc_mem_alloc_hdl_t mem_alloc_hdl)2897 tavor_ci_free_io_mem(ibc_hca_hdl_t hca, ibc_mem_alloc_hdl_t mem_alloc_hdl)
2898 {
2899 tavor_mem_alloc_hdl_t memhdl;
2900
2901 /* Check for valid HCA handle */
2902 if (hca == NULL) {
2903 return (IBT_HCA_HDL_INVALID);
2904 }
2905
2906 /* Check for valid mem_alloc_hdl handle pointer */
2907 if (mem_alloc_hdl == NULL) {
2908 return (IBT_MEM_ALLOC_HDL_INVALID);
2909 }
2910
2911 memhdl = (tavor_mem_alloc_hdl_t)mem_alloc_hdl;
2912
2913 /* free the memory */
2914 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*memhdl))
2915 ddi_dma_mem_free(&memhdl->tavor_acc_hdl);
2916 ddi_dma_free_handle(&memhdl->tavor_dma_hdl);
2917
2918 kmem_free(memhdl, sizeof (*memhdl));
2919 return (IBT_SUCCESS);
2920 }
2921
2922
2923 int
tavor_mem_alloc(tavor_state_t * state,size_t size,ibt_mr_flags_t flags,caddr_t * kaddrp,tavor_mem_alloc_hdl_t * mem_hdl)2924 tavor_mem_alloc(
2925 tavor_state_t *state,
2926 size_t size,
2927 ibt_mr_flags_t flags,
2928 caddr_t *kaddrp,
2929 tavor_mem_alloc_hdl_t *mem_hdl)
2930 {
2931 ddi_dma_handle_t dma_hdl;
2932 ddi_dma_attr_t dma_attr;
2933 ddi_acc_handle_t acc_hdl;
2934 size_t real_len;
2935 int status;
2936 int (*ddi_cb)(caddr_t);
2937
2938 tavor_dma_attr_init(&dma_attr);
2939
2940 ddi_cb = (flags & IBT_MR_NOSLEEP) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
2941
2942 /* Allocate a DMA handle */
2943 status = ddi_dma_alloc_handle(state->ts_dip, &dma_attr, ddi_cb,
2944 NULL, &dma_hdl);
2945 if (status != DDI_SUCCESS) {
2946 return (DDI_FAILURE);
2947 }
2948
2949 /* Allocate DMA memory */
2950 status = ddi_dma_mem_alloc(dma_hdl, size,
2951 &state->ts_reg_accattr, DDI_DMA_CONSISTENT, ddi_cb,
2952 NULL,
2953 kaddrp, &real_len, &acc_hdl);
2954 if (status != DDI_SUCCESS) {
2955 ddi_dma_free_handle(&dma_hdl);
2956 return (DDI_FAILURE);
2957 }
2958
2959 /* Package the tavor_dma_info contents and return */
2960 *mem_hdl = kmem_alloc(sizeof (**mem_hdl),
2961 flags & IBT_MR_NOSLEEP ? KM_NOSLEEP : KM_SLEEP);
2962 if (*mem_hdl == NULL) {
2963 ddi_dma_mem_free(&acc_hdl);
2964 ddi_dma_free_handle(&dma_hdl);
2965 return (DDI_FAILURE);
2966 }
2967 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(**mem_hdl))
2968 (*mem_hdl)->tavor_dma_hdl = dma_hdl;
2969 (*mem_hdl)->tavor_acc_hdl = acc_hdl;
2970
2971 return (DDI_SUCCESS);
2972 }
2973