1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * hermon_ci.c
28 * Hermon Channel Interface (CI) Routines
29 *
30 * Implements all the routines necessary to interface with the IBTF.
31 * Pointers to all of these functions are passed to the IBTF at attach()
32 * time in the ibc_operations_t structure. These functions include all
33 * of the necessary routines to implement the required InfiniBand "verbs"
34 * and additional IBTF-specific interfaces.
35 */
36
37 #include <sys/types.h>
38 #include <sys/conf.h>
39 #include <sys/ddi.h>
40 #include <sys/sunddi.h>
41
42 #include <sys/ib/adapters/hermon/hermon.h>
43
44 extern uint32_t hermon_kernel_data_ro;
45 extern uint32_t hermon_user_data_ro;
46
47 /* HCA and port related operations */
48 static ibt_status_t hermon_ci_query_hca_ports(ibc_hca_hdl_t, uint8_t,
49 ibt_hca_portinfo_t *);
50 static ibt_status_t hermon_ci_modify_ports(ibc_hca_hdl_t, uint8_t,
51 ibt_port_modify_flags_t, uint8_t);
52 static ibt_status_t hermon_ci_modify_system_image(ibc_hca_hdl_t, ib_guid_t);
53
54 /* Protection Domains */
55 static ibt_status_t hermon_ci_alloc_pd(ibc_hca_hdl_t, ibt_pd_flags_t,
56 ibc_pd_hdl_t *);
57 static ibt_status_t hermon_ci_free_pd(ibc_hca_hdl_t, ibc_pd_hdl_t);
58
59 /* Reliable Datagram Domains */
60 static ibt_status_t hermon_ci_alloc_rdd(ibc_hca_hdl_t, ibc_rdd_flags_t,
61 ibc_rdd_hdl_t *);
62 static ibt_status_t hermon_ci_free_rdd(ibc_hca_hdl_t, ibc_rdd_hdl_t);
63
64 /* Address Handles */
65 static ibt_status_t hermon_ci_alloc_ah(ibc_hca_hdl_t, ibt_ah_flags_t,
66 ibc_pd_hdl_t, ibt_adds_vect_t *, ibc_ah_hdl_t *);
67 static ibt_status_t hermon_ci_free_ah(ibc_hca_hdl_t, ibc_ah_hdl_t);
68 static ibt_status_t hermon_ci_query_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
69 ibc_pd_hdl_t *, ibt_adds_vect_t *);
70 static ibt_status_t hermon_ci_modify_ah(ibc_hca_hdl_t, ibc_ah_hdl_t,
71 ibt_adds_vect_t *);
72
73 /* Queue Pairs */
74 static ibt_status_t hermon_ci_alloc_qp(ibc_hca_hdl_t, ibtl_qp_hdl_t,
75 ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *, ib_qpn_t *,
76 ibc_qp_hdl_t *);
77 static ibt_status_t hermon_ci_alloc_special_qp(ibc_hca_hdl_t, uint8_t,
78 ibtl_qp_hdl_t, ibt_sqp_type_t, ibt_qp_alloc_attr_t *,
79 ibt_chan_sizes_t *, ibc_qp_hdl_t *);
80 static ibt_status_t hermon_ci_alloc_qp_range(ibc_hca_hdl_t, uint_t,
81 ibtl_qp_hdl_t *, ibt_qp_type_t, ibt_qp_alloc_attr_t *, ibt_chan_sizes_t *,
82 ibc_cq_hdl_t *, ibc_cq_hdl_t *, ib_qpn_t *, ibc_qp_hdl_t *);
83 static ibt_status_t hermon_ci_free_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
84 ibc_free_qp_flags_t, ibc_qpn_hdl_t *);
85 static ibt_status_t hermon_ci_release_qpn(ibc_hca_hdl_t, ibc_qpn_hdl_t);
86 static ibt_status_t hermon_ci_query_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
87 ibt_qp_query_attr_t *);
88 static ibt_status_t hermon_ci_modify_qp(ibc_hca_hdl_t, ibc_qp_hdl_t,
89 ibt_cep_modify_flags_t, ibt_qp_info_t *, ibt_queue_sizes_t *);
90
91 /* Completion Queues */
92 static ibt_status_t hermon_ci_alloc_cq(ibc_hca_hdl_t, ibt_cq_hdl_t,
93 ibt_cq_attr_t *, ibc_cq_hdl_t *, uint_t *);
94 static ibt_status_t hermon_ci_free_cq(ibc_hca_hdl_t, ibc_cq_hdl_t);
95 static ibt_status_t hermon_ci_query_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
96 uint_t *, uint_t *, uint_t *, ibt_cq_handler_id_t *);
97 static ibt_status_t hermon_ci_resize_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
98 uint_t, uint_t *);
99 static ibt_status_t hermon_ci_modify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
100 uint_t, uint_t, ibt_cq_handler_id_t);
101 static ibt_status_t hermon_ci_alloc_cq_sched(ibc_hca_hdl_t,
102 ibt_cq_sched_attr_t *, ibc_sched_hdl_t *);
103 static ibt_status_t hermon_ci_free_cq_sched(ibc_hca_hdl_t, ibc_sched_hdl_t);
104 static ibt_status_t hermon_ci_query_cq_handler_id(ibc_hca_hdl_t,
105 ibt_cq_handler_id_t, ibt_cq_handler_attr_t *);
106
107 /* EE Contexts */
108 static ibt_status_t hermon_ci_alloc_eec(ibc_hca_hdl_t, ibc_eec_flags_t,
109 ibt_eec_hdl_t, ibc_rdd_hdl_t, ibc_eec_hdl_t *);
110 static ibt_status_t hermon_ci_free_eec(ibc_hca_hdl_t, ibc_eec_hdl_t);
111 static ibt_status_t hermon_ci_query_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
112 ibt_eec_query_attr_t *);
113 static ibt_status_t hermon_ci_modify_eec(ibc_hca_hdl_t, ibc_eec_hdl_t,
114 ibt_cep_modify_flags_t, ibt_eec_info_t *);
115
116 /* Memory Registration */
117 static ibt_status_t hermon_ci_register_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
118 ibt_mr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *);
119 static ibt_status_t hermon_ci_register_buf(ibc_hca_hdl_t, ibc_pd_hdl_t,
120 ibt_smr_attr_t *, struct buf *, void *, ibt_mr_hdl_t *, ibt_mr_desc_t *);
121 static ibt_status_t hermon_ci_register_shared_mr(ibc_hca_hdl_t,
122 ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_smr_attr_t *, void *,
123 ibc_mr_hdl_t *, ibt_mr_desc_t *);
124 static ibt_status_t hermon_ci_deregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t);
125 static ibt_status_t hermon_ci_query_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
126 ibt_mr_query_attr_t *);
127 static ibt_status_t hermon_ci_reregister_mr(ibc_hca_hdl_t, ibc_mr_hdl_t,
128 ibc_pd_hdl_t, ibt_mr_attr_t *, void *, ibc_mr_hdl_t *,
129 ibt_mr_desc_t *);
130 static ibt_status_t hermon_ci_reregister_buf(ibc_hca_hdl_t, ibc_mr_hdl_t,
131 ibc_pd_hdl_t, ibt_smr_attr_t *, struct buf *, void *, ibc_mr_hdl_t *,
132 ibt_mr_desc_t *);
133 static ibt_status_t hermon_ci_sync_mr(ibc_hca_hdl_t, ibt_mr_sync_t *, size_t);
134 static ibt_status_t hermon_ci_register_dma_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
135 ibt_dmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_mr_desc_t *);
136
137 /* Memory Windows */
138 static ibt_status_t hermon_ci_alloc_mw(ibc_hca_hdl_t, ibc_pd_hdl_t,
139 ibt_mw_flags_t, ibc_mw_hdl_t *, ibt_rkey_t *);
140 static ibt_status_t hermon_ci_free_mw(ibc_hca_hdl_t, ibc_mw_hdl_t);
141 static ibt_status_t hermon_ci_query_mw(ibc_hca_hdl_t, ibc_mw_hdl_t,
142 ibt_mw_query_attr_t *);
143
144 /* Multicast Groups */
145 static ibt_status_t hermon_ci_attach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
146 ib_gid_t, ib_lid_t);
147 static ibt_status_t hermon_ci_detach_mcg(ibc_hca_hdl_t, ibc_qp_hdl_t,
148 ib_gid_t, ib_lid_t);
149
150 /* Work Request and Completion Processing */
151 static ibt_status_t hermon_ci_post_send(ibc_hca_hdl_t, ibc_qp_hdl_t,
152 ibt_send_wr_t *, uint_t, uint_t *);
153 static ibt_status_t hermon_ci_post_recv(ibc_hca_hdl_t, ibc_qp_hdl_t,
154 ibt_recv_wr_t *, uint_t, uint_t *);
155 static ibt_status_t hermon_ci_poll_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
156 ibt_wc_t *, uint_t, uint_t *);
157 static ibt_status_t hermon_ci_notify_cq(ibc_hca_hdl_t, ibc_cq_hdl_t,
158 ibt_cq_notify_flags_t);
159
160 /* CI Object Private Data */
161 static ibt_status_t hermon_ci_ci_data_in(ibc_hca_hdl_t, ibt_ci_data_flags_t,
162 ibt_object_type_t, void *, void *, size_t);
163
164 /* CI Object Private Data */
165 static ibt_status_t hermon_ci_ci_data_out(ibc_hca_hdl_t, ibt_ci_data_flags_t,
166 ibt_object_type_t, void *, void *, size_t);
167
168 /* Shared Receive Queues */
169 static ibt_status_t hermon_ci_alloc_srq(ibc_hca_hdl_t, ibt_srq_flags_t,
170 ibt_srq_hdl_t, ibc_pd_hdl_t, ibt_srq_sizes_t *, ibc_srq_hdl_t *,
171 ibt_srq_sizes_t *);
172 static ibt_status_t hermon_ci_free_srq(ibc_hca_hdl_t, ibc_srq_hdl_t);
173 static ibt_status_t hermon_ci_query_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
174 ibc_pd_hdl_t *, ibt_srq_sizes_t *, uint_t *);
175 static ibt_status_t hermon_ci_modify_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
176 ibt_srq_modify_flags_t, uint_t, uint_t, uint_t *);
177 static ibt_status_t hermon_ci_post_srq(ibc_hca_hdl_t, ibc_srq_hdl_t,
178 ibt_recv_wr_t *, uint_t, uint_t *);
179
180 /* Address translation */
181 static ibt_status_t hermon_ci_map_mem_area(ibc_hca_hdl_t, ibt_va_attr_t *,
182 void *, uint_t, ibt_reg_req_t *, ibc_ma_hdl_t *);
183 static ibt_status_t hermon_ci_unmap_mem_area(ibc_hca_hdl_t, ibc_ma_hdl_t);
184 static ibt_status_t hermon_ci_map_mem_iov(ibc_hca_hdl_t, ibt_iov_attr_t *,
185 ibt_all_wr_t *, ibc_mi_hdl_t *);
186 static ibt_status_t hermon_ci_unmap_mem_iov(ibc_hca_hdl_t, ibc_mi_hdl_t);
187
188 /* Allocate L_Key */
189 static ibt_status_t hermon_ci_alloc_lkey(ibc_hca_hdl_t, ibc_pd_hdl_t,
190 ibt_lkey_flags_t, uint_t, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
191
192 /* Physical Register Memory Region */
193 static ibt_status_t hermon_ci_register_physical_mr(ibc_hca_hdl_t, ibc_pd_hdl_t,
194 ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *, ibt_pmr_desc_t *);
195 static ibt_status_t hermon_ci_reregister_physical_mr(ibc_hca_hdl_t,
196 ibc_mr_hdl_t, ibc_pd_hdl_t, ibt_pmr_attr_t *, void *, ibc_mr_hdl_t *,
197 ibt_pmr_desc_t *);
198
199 /* Mellanox FMR */
200 static ibt_status_t hermon_ci_create_fmr_pool(ibc_hca_hdl_t hca,
201 ibc_pd_hdl_t pd, ibt_fmr_pool_attr_t *fmr_params,
202 ibc_fmr_pool_hdl_t *fmr_pool);
203 static ibt_status_t hermon_ci_destroy_fmr_pool(ibc_hca_hdl_t hca,
204 ibc_fmr_pool_hdl_t fmr_pool);
205 static ibt_status_t hermon_ci_flush_fmr_pool(ibc_hca_hdl_t hca,
206 ibc_fmr_pool_hdl_t fmr_pool);
207 static ibt_status_t hermon_ci_register_physical_fmr(ibc_hca_hdl_t hca,
208 ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
209 void *ibtl_reserved, ibc_mr_hdl_t *mr_hdl_p, ibt_pmr_desc_t *mem_desc_p);
210 static ibt_status_t hermon_ci_deregister_fmr(ibc_hca_hdl_t hca,
211 ibc_mr_hdl_t mr);
212
213 /* Memory Allocation/Deallocation */
214 static ibt_status_t hermon_ci_alloc_io_mem(ibc_hca_hdl_t hca, size_t size,
215 ibt_mr_flags_t mr_flag, caddr_t *kaddrp,
216 ibc_mem_alloc_hdl_t *mem_alloc_hdl_p);
217 static ibt_status_t hermon_ci_free_io_mem(ibc_hca_hdl_t hca,
218 ibc_mem_alloc_hdl_t mem_alloc_hdl);
219 static ibt_status_t hermon_ci_not_supported();
220
221 /*
222 * This ibc_operations_t structure includes pointers to all the entry points
223 * provided by the Hermon driver. This structure is passed to the IBTF at
224 * driver attach time, using the ibc_attach() call.
225 */
226 ibc_operations_t hermon_ibc_ops = {
227 /* HCA and port related operations */
228 hermon_ci_query_hca_ports,
229 hermon_ci_modify_ports,
230 hermon_ci_modify_system_image,
231
232 /* Protection Domains */
233 hermon_ci_alloc_pd,
234 hermon_ci_free_pd,
235
236 /* Reliable Datagram Domains */
237 hermon_ci_alloc_rdd,
238 hermon_ci_free_rdd,
239
240 /* Address Handles */
241 hermon_ci_alloc_ah,
242 hermon_ci_free_ah,
243 hermon_ci_query_ah,
244 hermon_ci_modify_ah,
245
246 /* Queue Pairs */
247 hermon_ci_alloc_qp,
248 hermon_ci_alloc_special_qp,
249 hermon_ci_alloc_qp_range,
250 hermon_ci_free_qp,
251 hermon_ci_release_qpn,
252 hermon_ci_query_qp,
253 hermon_ci_modify_qp,
254
255 /* Completion Queues */
256 hermon_ci_alloc_cq,
257 hermon_ci_free_cq,
258 hermon_ci_query_cq,
259 hermon_ci_resize_cq,
260 hermon_ci_modify_cq,
261 hermon_ci_alloc_cq_sched,
262 hermon_ci_free_cq_sched,
263 hermon_ci_query_cq_handler_id,
264
265 /* EE Contexts */
266 hermon_ci_alloc_eec,
267 hermon_ci_free_eec,
268 hermon_ci_query_eec,
269 hermon_ci_modify_eec,
270
271 /* Memory Registration */
272 hermon_ci_register_mr,
273 hermon_ci_register_buf,
274 hermon_ci_register_shared_mr,
275 hermon_ci_deregister_mr,
276 hermon_ci_query_mr,
277 hermon_ci_reregister_mr,
278 hermon_ci_reregister_buf,
279 hermon_ci_sync_mr,
280
281 /* Memory Windows */
282 hermon_ci_alloc_mw,
283 hermon_ci_free_mw,
284 hermon_ci_query_mw,
285
286 /* Multicast Groups */
287 hermon_ci_attach_mcg,
288 hermon_ci_detach_mcg,
289
290 /* Work Request and Completion Processing */
291 hermon_ci_post_send,
292 hermon_ci_post_recv,
293 hermon_ci_poll_cq,
294 hermon_ci_notify_cq,
295
296 /* CI Object Mapping Data */
297 hermon_ci_ci_data_in,
298 hermon_ci_ci_data_out,
299
300 /* Shared Receive Queue */
301 hermon_ci_alloc_srq,
302 hermon_ci_free_srq,
303 hermon_ci_query_srq,
304 hermon_ci_modify_srq,
305 hermon_ci_post_srq,
306
307 /* Address translation */
308 hermon_ci_map_mem_area,
309 hermon_ci_unmap_mem_area,
310 hermon_ci_map_mem_iov,
311 hermon_ci_unmap_mem_iov,
312
313 /* Allocate L_key */
314 hermon_ci_alloc_lkey,
315
316 /* Physical Register Memory Region */
317 hermon_ci_register_physical_mr,
318 hermon_ci_reregister_physical_mr,
319
320 /* Mellanox FMR */
321 hermon_ci_create_fmr_pool,
322 hermon_ci_destroy_fmr_pool,
323 hermon_ci_flush_fmr_pool,
324 hermon_ci_register_physical_fmr,
325 hermon_ci_deregister_fmr,
326
327 /* Memory allocation */
328 hermon_ci_alloc_io_mem,
329 hermon_ci_free_io_mem,
330
331 /* XRC not yet supported */
332 hermon_ci_not_supported, /* ibc_alloc_xrc_domain */
333 hermon_ci_not_supported, /* ibc_free_xrc_domain */
334 hermon_ci_not_supported, /* ibc_alloc_xrc_srq */
335 hermon_ci_not_supported, /* ibc_free_xrc_srq */
336 hermon_ci_not_supported, /* ibc_query_xrc_srq */
337 hermon_ci_not_supported, /* ibc_modify_xrc_srq */
338 hermon_ci_not_supported, /* ibc_alloc_xrc_tgt_qp */
339 hermon_ci_not_supported, /* ibc_free_xrc_tgt_qp */
340 hermon_ci_not_supported, /* ibc_query_xrc_tgt_qp */
341 hermon_ci_not_supported, /* ibc_modify_xrc_tgt_qp */
342
343 /* Memory Region (physical) */
344 hermon_ci_register_dma_mr,
345
346 /* Next enhancements */
347 hermon_ci_not_supported, /* ibc_enhancement1 */
348 hermon_ci_not_supported, /* ibc_enhancement2 */
349 hermon_ci_not_supported, /* ibc_enhancement3 */
350 hermon_ci_not_supported, /* ibc_enhancement4 */
351 };
352
353 /*
354 * Not yet implemented OPS
355 */
356 /* ARGSUSED */
357 static ibt_status_t
hermon_ci_not_supported()358 hermon_ci_not_supported()
359 {
360 return (IBT_NOT_SUPPORTED);
361 }
362
363
364 /*
365 * hermon_ci_query_hca_ports()
366 * Returns HCA port attributes for either one or all of the HCA's ports.
367 * Context: Can be called only from user or kernel context.
368 */
369 static ibt_status_t
hermon_ci_query_hca_ports(ibc_hca_hdl_t hca,uint8_t query_port,ibt_hca_portinfo_t * info_p)370 hermon_ci_query_hca_ports(ibc_hca_hdl_t hca, uint8_t query_port,
371 ibt_hca_portinfo_t *info_p)
372 {
373 hermon_state_t *state;
374 uint_t start, end, port;
375 int status, indx;
376
377 /* Grab the Hermon softstate pointer */
378 state = (hermon_state_t *)hca;
379
380 /*
381 * If the specified port is zero, then we are supposed to query all
382 * ports. Otherwise, we query only the port number specified.
383 * Setup the start and end port numbers as appropriate for the loop
384 * below. Note: The first Hermon port is port number one (1).
385 */
386 if (query_port == 0) {
387 start = 1;
388 end = start + (state->hs_cfg_profile->cp_num_ports - 1);
389 } else {
390 end = start = query_port;
391 }
392
393 /* Query the port(s) */
394 for (port = start, indx = 0; port <= end; port++, indx++) {
395 status = hermon_port_query(state, port, &info_p[indx]);
396 if (status != DDI_SUCCESS) {
397 return (status);
398 }
399 }
400 return (IBT_SUCCESS);
401 }
402
403
404 /*
405 * hermon_ci_modify_ports()
406 * Modify HCA port attributes
407 * Context: Can be called only from user or kernel context.
408 */
409 static ibt_status_t
hermon_ci_modify_ports(ibc_hca_hdl_t hca,uint8_t port,ibt_port_modify_flags_t flags,uint8_t init_type)410 hermon_ci_modify_ports(ibc_hca_hdl_t hca, uint8_t port,
411 ibt_port_modify_flags_t flags, uint8_t init_type)
412 {
413 hermon_state_t *state;
414 int status;
415
416 /* Grab the Hermon softstate pointer */
417 state = (hermon_state_t *)hca;
418
419 /* Modify the port(s) */
420 status = hermon_port_modify(state, port, flags, init_type);
421 return (status);
422 }
423
424 /*
425 * hermon_ci_modify_system_image()
426 * Modify the System Image GUID
427 * Context: Can be called only from user or kernel context.
428 */
429 /* ARGSUSED */
430 static ibt_status_t
hermon_ci_modify_system_image(ibc_hca_hdl_t hca,ib_guid_t sys_guid)431 hermon_ci_modify_system_image(ibc_hca_hdl_t hca, ib_guid_t sys_guid)
432 {
433 /*
434 * This is an unsupported interface for the Hermon driver. This
435 * interface is necessary to support modification of the System
436 * Image GUID. Hermon is only capable of modifying this parameter
437 * once (during driver initialization).
438 */
439 return (IBT_NOT_SUPPORTED);
440 }
441
442 /*
443 * hermon_ci_alloc_pd()
444 * Allocate a Protection Domain
445 * Context: Can be called only from user or kernel context.
446 */
447 /* ARGSUSED */
448 static ibt_status_t
hermon_ci_alloc_pd(ibc_hca_hdl_t hca,ibt_pd_flags_t flags,ibc_pd_hdl_t * pd_p)449 hermon_ci_alloc_pd(ibc_hca_hdl_t hca, ibt_pd_flags_t flags, ibc_pd_hdl_t *pd_p)
450 {
451 hermon_state_t *state;
452 hermon_pdhdl_t pdhdl;
453 int status;
454
455 ASSERT(pd_p != NULL);
456
457 /* Grab the Hermon softstate pointer */
458 state = (hermon_state_t *)hca;
459
460 /* Allocate the PD */
461 status = hermon_pd_alloc(state, &pdhdl, HERMON_NOSLEEP);
462 if (status != DDI_SUCCESS) {
463 return (status);
464 }
465
466 /* Return the Hermon PD handle */
467 *pd_p = (ibc_pd_hdl_t)pdhdl;
468
469 return (IBT_SUCCESS);
470 }
471
472
473 /*
474 * hermon_ci_free_pd()
475 * Free a Protection Domain
476 * Context: Can be called only from user or kernel context
477 */
478 static ibt_status_t
hermon_ci_free_pd(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd)479 hermon_ci_free_pd(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd)
480 {
481 hermon_state_t *state;
482 hermon_pdhdl_t pdhdl;
483 int status;
484
485 /* Grab the Hermon softstate pointer and PD handle */
486 state = (hermon_state_t *)hca;
487 pdhdl = (hermon_pdhdl_t)pd;
488
489 /* Free the PD */
490 status = hermon_pd_free(state, &pdhdl);
491 return (status);
492 }
493
494
495 /*
496 * hermon_ci_alloc_rdd()
497 * Allocate a Reliable Datagram Domain
498 * Context: Can be called only from user or kernel context.
499 */
500 /* ARGSUSED */
501 static ibt_status_t
hermon_ci_alloc_rdd(ibc_hca_hdl_t hca,ibc_rdd_flags_t flags,ibc_rdd_hdl_t * rdd_p)502 hermon_ci_alloc_rdd(ibc_hca_hdl_t hca, ibc_rdd_flags_t flags,
503 ibc_rdd_hdl_t *rdd_p)
504 {
505 /*
506 * This is an unsupported interface for the Hermon driver. This
507 * interface is necessary to support Reliable Datagram (RD)
508 * operations. Hermon does not support RD.
509 */
510 return (IBT_NOT_SUPPORTED);
511 }
512
513
514 /*
515 * hermon_free_rdd()
516 * Free a Reliable Datagram Domain
517 * Context: Can be called only from user or kernel context.
518 */
519 /* ARGSUSED */
520 static ibt_status_t
hermon_ci_free_rdd(ibc_hca_hdl_t hca,ibc_rdd_hdl_t rdd)521 hermon_ci_free_rdd(ibc_hca_hdl_t hca, ibc_rdd_hdl_t rdd)
522 {
523 /*
524 * This is an unsupported interface for the Hermon driver. This
525 * interface is necessary to support Reliable Datagram (RD)
526 * operations. Hermon does not support RD.
527 */
528 return (IBT_NOT_SUPPORTED);
529 }
530
531
532 /*
533 * hermon_ci_alloc_ah()
534 * Allocate an Address Handle
535 * Context: Can be called only from user or kernel context.
536 */
537 /* ARGSUSED */
538 static ibt_status_t
hermon_ci_alloc_ah(ibc_hca_hdl_t hca,ibt_ah_flags_t flags,ibc_pd_hdl_t pd,ibt_adds_vect_t * attr_p,ibc_ah_hdl_t * ah_p)539 hermon_ci_alloc_ah(ibc_hca_hdl_t hca, ibt_ah_flags_t flags, ibc_pd_hdl_t pd,
540 ibt_adds_vect_t *attr_p, ibc_ah_hdl_t *ah_p)
541 {
542 hermon_state_t *state;
543 hermon_ahhdl_t ahhdl;
544 hermon_pdhdl_t pdhdl;
545 int status;
546
547 /* Grab the Hermon softstate pointer and PD handle */
548 state = (hermon_state_t *)hca;
549 pdhdl = (hermon_pdhdl_t)pd;
550
551 /* Allocate the AH */
552 status = hermon_ah_alloc(state, pdhdl, attr_p, &ahhdl, HERMON_NOSLEEP);
553 if (status != DDI_SUCCESS) {
554 return (status);
555 }
556
557 /* Return the Hermon AH handle */
558 *ah_p = (ibc_ah_hdl_t)ahhdl;
559
560 return (IBT_SUCCESS);
561 }
562
563
564 /*
565 * hermon_ci_free_ah()
566 * Free an Address Handle
567 * Context: Can be called only from user or kernel context.
568 */
569 static ibt_status_t
hermon_ci_free_ah(ibc_hca_hdl_t hca,ibc_ah_hdl_t ah)570 hermon_ci_free_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah)
571 {
572 hermon_state_t *state;
573 hermon_ahhdl_t ahhdl;
574 int status;
575
576 /* Grab the Hermon softstate pointer and AH handle */
577 state = (hermon_state_t *)hca;
578 ahhdl = (hermon_ahhdl_t)ah;
579
580 /* Free the AH */
581 status = hermon_ah_free(state, &ahhdl, HERMON_NOSLEEP);
582
583 return (status);
584 }
585
586
587 /*
588 * hermon_ci_query_ah()
589 * Return the Address Vector information for a specified Address Handle
590 * Context: Can be called from interrupt or base context.
591 */
592 static ibt_status_t
hermon_ci_query_ah(ibc_hca_hdl_t hca,ibc_ah_hdl_t ah,ibc_pd_hdl_t * pd_p,ibt_adds_vect_t * attr_p)593 hermon_ci_query_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibc_pd_hdl_t *pd_p,
594 ibt_adds_vect_t *attr_p)
595 {
596 hermon_state_t *state;
597 hermon_ahhdl_t ahhdl;
598 hermon_pdhdl_t pdhdl;
599 int status;
600
601 /* Grab the Hermon softstate pointer and AH handle */
602 state = (hermon_state_t *)hca;
603 ahhdl = (hermon_ahhdl_t)ah;
604
605 /* Query the AH */
606 status = hermon_ah_query(state, ahhdl, &pdhdl, attr_p);
607 if (status != DDI_SUCCESS) {
608 return (status);
609 }
610
611 /* Return the Hermon PD handle */
612 *pd_p = (ibc_pd_hdl_t)pdhdl;
613
614 return (IBT_SUCCESS);
615 }
616
617
618 /*
619 * hermon_ci_modify_ah()
620 * Modify the Address Vector information of a specified Address Handle
621 * Context: Can be called from interrupt or base context.
622 */
623 static ibt_status_t
hermon_ci_modify_ah(ibc_hca_hdl_t hca,ibc_ah_hdl_t ah,ibt_adds_vect_t * attr_p)624 hermon_ci_modify_ah(ibc_hca_hdl_t hca, ibc_ah_hdl_t ah, ibt_adds_vect_t *attr_p)
625 {
626 hermon_state_t *state;
627 hermon_ahhdl_t ahhdl;
628 int status;
629
630 /* Grab the Hermon softstate pointer and AH handle */
631 state = (hermon_state_t *)hca;
632 ahhdl = (hermon_ahhdl_t)ah;
633
634 /* Modify the AH */
635 status = hermon_ah_modify(state, ahhdl, attr_p);
636
637 return (status);
638 }
639
640
641 /*
642 * hermon_ci_alloc_qp()
643 * Allocate a Queue Pair
644 * Context: Can be called only from user or kernel context.
645 */
646 static ibt_status_t
hermon_ci_alloc_qp(ibc_hca_hdl_t hca,ibtl_qp_hdl_t ibt_qphdl,ibt_qp_type_t type,ibt_qp_alloc_attr_t * attr_p,ibt_chan_sizes_t * queue_sizes_p,ib_qpn_t * qpn,ibc_qp_hdl_t * qp_p)647 hermon_ci_alloc_qp(ibc_hca_hdl_t hca, ibtl_qp_hdl_t ibt_qphdl,
648 ibt_qp_type_t type, ibt_qp_alloc_attr_t *attr_p,
649 ibt_chan_sizes_t *queue_sizes_p, ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
650 {
651 hermon_state_t *state;
652 hermon_qp_info_t qpinfo;
653 int status;
654
655 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
656 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
657
658 /* Grab the Hermon softstate pointer */
659 state = (hermon_state_t *)hca;
660
661 /* Allocate the QP */
662 qpinfo.qpi_attrp = attr_p;
663 qpinfo.qpi_type = type;
664 qpinfo.qpi_ibt_qphdl = ibt_qphdl;
665 qpinfo.qpi_queueszp = queue_sizes_p;
666 qpinfo.qpi_qpn = qpn;
667 status = hermon_qp_alloc(state, &qpinfo, HERMON_NOSLEEP);
668 if (status != DDI_SUCCESS) {
669 return (status);
670 }
671
672 /* Return the Hermon QP handle */
673 *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
674
675 return (IBT_SUCCESS);
676 }
677
678
679 /*
680 * hermon_ci_alloc_special_qp()
681 * Allocate a Special Queue Pair
682 * Context: Can be called only from user or kernel context.
683 */
684 static ibt_status_t
hermon_ci_alloc_special_qp(ibc_hca_hdl_t hca,uint8_t port,ibtl_qp_hdl_t ibt_qphdl,ibt_sqp_type_t type,ibt_qp_alloc_attr_t * attr_p,ibt_chan_sizes_t * queue_sizes_p,ibc_qp_hdl_t * qp_p)685 hermon_ci_alloc_special_qp(ibc_hca_hdl_t hca, uint8_t port,
686 ibtl_qp_hdl_t ibt_qphdl, ibt_sqp_type_t type,
687 ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
688 ibc_qp_hdl_t *qp_p)
689 {
690 hermon_state_t *state;
691 hermon_qp_info_t qpinfo;
692 int status;
693
694 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
695 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
696
697 /* Grab the Hermon softstate pointer */
698 state = (hermon_state_t *)hca;
699
700 /* Allocate the Special QP */
701 qpinfo.qpi_attrp = attr_p;
702 qpinfo.qpi_type = type;
703 qpinfo.qpi_port = port;
704 qpinfo.qpi_ibt_qphdl = ibt_qphdl;
705 qpinfo.qpi_queueszp = queue_sizes_p;
706 status = hermon_special_qp_alloc(state, &qpinfo, HERMON_NOSLEEP);
707 if (status != DDI_SUCCESS) {
708 return (status);
709 }
710 /* Return the Hermon QP handle */
711 *qp_p = (ibc_qp_hdl_t)qpinfo.qpi_qphdl;
712
713 return (IBT_SUCCESS);
714 }
715
716 /*
717 * hermon_ci_alloc_qp_range()
718 * Free a Queue Pair
719 * Context: Can be called only from user or kernel context.
720 */
721 /* ARGSUSED */
722 static ibt_status_t
hermon_ci_alloc_qp_range(ibc_hca_hdl_t hca,uint_t log2,ibtl_qp_hdl_t * ibtl_qp,ibt_qp_type_t type,ibt_qp_alloc_attr_t * attr_p,ibt_chan_sizes_t * queue_sizes_p,ibc_cq_hdl_t * send_cq,ibc_cq_hdl_t * recv_cq,ib_qpn_t * qpn,ibc_qp_hdl_t * qp_p)723 hermon_ci_alloc_qp_range(ibc_hca_hdl_t hca, uint_t log2,
724 ibtl_qp_hdl_t *ibtl_qp, ibt_qp_type_t type,
725 ibt_qp_alloc_attr_t *attr_p, ibt_chan_sizes_t *queue_sizes_p,
726 ibc_cq_hdl_t *send_cq, ibc_cq_hdl_t *recv_cq,
727 ib_qpn_t *qpn, ibc_qp_hdl_t *qp_p)
728 {
729 hermon_state_t *state;
730 hermon_qp_info_t qpinfo;
731 int status;
732
733 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr_p))
734 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*queue_sizes_p))
735
736 /* Grab the Hermon softstate pointer */
737 state = (hermon_state_t *)hca;
738
739 /* Allocate the QP */
740 qpinfo.qpi_attrp = attr_p;
741 qpinfo.qpi_type = type;
742 qpinfo.qpi_queueszp = queue_sizes_p;
743 qpinfo.qpi_qpn = qpn;
744 status = hermon_qp_alloc_range(state, log2, &qpinfo, ibtl_qp,
745 send_cq, recv_cq, (hermon_qphdl_t *)qp_p, HERMON_NOSLEEP);
746 return (status);
747 }
748
749 /*
750 * hermon_ci_free_qp()
751 * Free a Queue Pair
752 * Context: Can be called only from user or kernel context.
753 */
754 static ibt_status_t
hermon_ci_free_qp(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ibc_free_qp_flags_t free_qp_flags,ibc_qpn_hdl_t * qpnh_p)755 hermon_ci_free_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
756 ibc_free_qp_flags_t free_qp_flags, ibc_qpn_hdl_t *qpnh_p)
757 {
758 hermon_state_t *state;
759 hermon_qphdl_t qphdl;
760 int status;
761
762 /* Grab the Hermon softstate pointer and QP handle */
763 state = (hermon_state_t *)hca;
764 qphdl = (hermon_qphdl_t)qp;
765
766 /* Free the QP */
767 status = hermon_qp_free(state, &qphdl, free_qp_flags, qpnh_p,
768 HERMON_NOSLEEP);
769
770 return (status);
771 }
772
773
774 /*
775 * hermon_ci_release_qpn()
776 * Release a Queue Pair Number (QPN)
777 * Context: Can be called only from user or kernel context.
778 */
779 static ibt_status_t
hermon_ci_release_qpn(ibc_hca_hdl_t hca,ibc_qpn_hdl_t qpnh)780 hermon_ci_release_qpn(ibc_hca_hdl_t hca, ibc_qpn_hdl_t qpnh)
781 {
782 hermon_state_t *state;
783 hermon_qpn_entry_t *entry;
784
785 /* Grab the Hermon softstate pointer and QP handle */
786 state = (hermon_state_t *)hca;
787 entry = (hermon_qpn_entry_t *)qpnh;
788
789 /* Release the QP number */
790 hermon_qp_release_qpn(state, entry, HERMON_QPN_RELEASE);
791
792 return (IBT_SUCCESS);
793 }
794
795
796 /*
797 * hermon_ci_query_qp()
798 * Query a Queue Pair
799 * Context: Can be called from interrupt or base context.
800 */
801 static ibt_status_t
hermon_ci_query_qp(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ibt_qp_query_attr_t * attr_p)802 hermon_ci_query_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
803 ibt_qp_query_attr_t *attr_p)
804 {
805 hermon_state_t *state;
806 hermon_qphdl_t qphdl;
807 int status;
808
809 /* Grab the Hermon softstate pointer and QP handle */
810 state = (hermon_state_t *)hca;
811 qphdl = (hermon_qphdl_t)qp;
812
813 /* Query the QP */
814 status = hermon_qp_query(state, qphdl, attr_p);
815 return (status);
816 }
817
818
819 /*
820 * hermon_ci_modify_qp()
821 * Modify a Queue Pair
822 * Context: Can be called from interrupt or base context.
823 */
824 static ibt_status_t
hermon_ci_modify_qp(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ibt_cep_modify_flags_t flags,ibt_qp_info_t * info_p,ibt_queue_sizes_t * actual_sz)825 hermon_ci_modify_qp(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp,
826 ibt_cep_modify_flags_t flags, ibt_qp_info_t *info_p,
827 ibt_queue_sizes_t *actual_sz)
828 {
829 hermon_state_t *state;
830 hermon_qphdl_t qphdl;
831 int status;
832
833 /* Grab the Hermon softstate pointer and QP handle */
834 state = (hermon_state_t *)hca;
835 qphdl = (hermon_qphdl_t)qp;
836
837 /* Modify the QP */
838 status = hermon_qp_modify(state, qphdl, flags, info_p, actual_sz);
839 return (status);
840 }
841
842
843 /*
844 * hermon_ci_alloc_cq()
845 * Allocate a Completion Queue
846 * Context: Can be called only from user or kernel context.
847 */
848 /* ARGSUSED */
849 static ibt_status_t
hermon_ci_alloc_cq(ibc_hca_hdl_t hca,ibt_cq_hdl_t ibt_cqhdl,ibt_cq_attr_t * attr_p,ibc_cq_hdl_t * cq_p,uint_t * actual_size)850 hermon_ci_alloc_cq(ibc_hca_hdl_t hca, ibt_cq_hdl_t ibt_cqhdl,
851 ibt_cq_attr_t *attr_p, ibc_cq_hdl_t *cq_p, uint_t *actual_size)
852 {
853 hermon_state_t *state;
854 hermon_cqhdl_t cqhdl;
855 int status;
856
857 state = (hermon_state_t *)hca;
858
859 /* Allocate the CQ */
860 status = hermon_cq_alloc(state, ibt_cqhdl, attr_p, actual_size,
861 &cqhdl, HERMON_NOSLEEP);
862 if (status != DDI_SUCCESS) {
863 return (status);
864 }
865
866 /* Return the Hermon CQ handle */
867 *cq_p = (ibc_cq_hdl_t)cqhdl;
868
869 return (IBT_SUCCESS);
870 }
871
872
873 /*
874 * hermon_ci_free_cq()
875 * Free a Completion Queue
876 * Context: Can be called only from user or kernel context.
877 */
878 static ibt_status_t
hermon_ci_free_cq(ibc_hca_hdl_t hca,ibc_cq_hdl_t cq)879 hermon_ci_free_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq)
880 {
881 hermon_state_t *state;
882 hermon_cqhdl_t cqhdl;
883 int status;
884
885 /* Grab the Hermon softstate pointer and CQ handle */
886 state = (hermon_state_t *)hca;
887 cqhdl = (hermon_cqhdl_t)cq;
888
889
890 /* Free the CQ */
891 status = hermon_cq_free(state, &cqhdl, HERMON_NOSLEEP);
892 return (status);
893 }
894
895
896 /*
897 * hermon_ci_query_cq()
898 * Return the size of a Completion Queue
899 * Context: Can be called only from user or kernel context.
900 */
901 static ibt_status_t
hermon_ci_query_cq(ibc_hca_hdl_t hca,ibc_cq_hdl_t cq,uint_t * entries_p,uint_t * count_p,uint_t * usec_p,ibt_cq_handler_id_t * hid_p)902 hermon_ci_query_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t *entries_p,
903 uint_t *count_p, uint_t *usec_p, ibt_cq_handler_id_t *hid_p)
904 {
905 hermon_state_t *state;
906 hermon_cqhdl_t cqhdl;
907
908 /* Grab the CQ handle */
909 state = (hermon_state_t *)hca;
910 cqhdl = (hermon_cqhdl_t)cq;
911
912 /* Query the current CQ size */
913 *entries_p = cqhdl->cq_bufsz;
914 *count_p = cqhdl->cq_intmod_count;
915 *usec_p = cqhdl->cq_intmod_usec;
916 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*cqhdl))
917 *hid_p = HERMON_EQNUM_TO_HID(state, cqhdl->cq_eqnum);
918
919 return (IBT_SUCCESS);
920 }
921
922
923 /*
924 * hermon_ci_resize_cq()
925 * Change the size of a Completion Queue
926 * Context: Can be called only from user or kernel context.
927 */
928 static ibt_status_t
hermon_ci_resize_cq(ibc_hca_hdl_t hca,ibc_cq_hdl_t cq,uint_t size,uint_t * actual_size)929 hermon_ci_resize_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t size,
930 uint_t *actual_size)
931 {
932 hermon_state_t *state;
933 hermon_cqhdl_t cqhdl;
934 int status;
935
936 /* Grab the Hermon softstate pointer and CQ handle */
937 state = (hermon_state_t *)hca;
938 cqhdl = (hermon_cqhdl_t)cq;
939
940 /* Resize the CQ */
941 status = hermon_cq_resize(state, cqhdl, size, actual_size,
942 HERMON_NOSLEEP);
943 if (status != DDI_SUCCESS) {
944 return (status);
945 }
946 return (IBT_SUCCESS);
947 }
948
949 /*
950 * hermon_ci_modify_cq()
951 * Change the interrupt moderation values of a Completion Queue
952 * Context: Can be called only from user or kernel context.
953 */
954 static ibt_status_t
hermon_ci_modify_cq(ibc_hca_hdl_t hca,ibc_cq_hdl_t cq,uint_t count,uint_t usec,ibt_cq_handler_id_t hid)955 hermon_ci_modify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, uint_t count,
956 uint_t usec, ibt_cq_handler_id_t hid)
957 {
958 hermon_state_t *state;
959 hermon_cqhdl_t cqhdl;
960 int status;
961
962 /* Grab the Hermon softstate pointer and CQ handle */
963 state = (hermon_state_t *)hca;
964 cqhdl = (hermon_cqhdl_t)cq;
965
966 /* Resize the CQ */
967 status = hermon_cq_modify(state, cqhdl, count, usec, hid,
968 HERMON_NOSLEEP);
969 return (status);
970 }
971
972
973 /*
974 * hermon_ci_alloc_cq_sched()
975 * Reserve a CQ scheduling class resource
976 * Context: Can be called only from user or kernel context.
977 */
978 /* ARGSUSED */
979 static ibt_status_t
hermon_ci_alloc_cq_sched(ibc_hca_hdl_t hca,ibt_cq_sched_attr_t * attr,ibc_sched_hdl_t * sched_hdl_p)980 hermon_ci_alloc_cq_sched(ibc_hca_hdl_t hca, ibt_cq_sched_attr_t *attr,
981 ibc_sched_hdl_t *sched_hdl_p)
982 {
983 int status;
984
985 status = hermon_cq_sched_alloc((hermon_state_t *)hca, attr,
986 (hermon_cq_sched_t **)sched_hdl_p);
987 return (status);
988 }
989
990
991 /*
992 * hermon_ci_free_cq_sched()
993 * Free a CQ scheduling class resource
994 * Context: Can be called only from user or kernel context.
995 */
996 /* ARGSUSED */
997 static ibt_status_t
hermon_ci_free_cq_sched(ibc_hca_hdl_t hca,ibc_sched_hdl_t sched_hdl)998 hermon_ci_free_cq_sched(ibc_hca_hdl_t hca, ibc_sched_hdl_t sched_hdl)
999 {
1000 int status;
1001
1002 status = hermon_cq_sched_free((hermon_state_t *)hca,
1003 (hermon_cq_sched_t *)sched_hdl);
1004 return (status);
1005 }
1006
1007 static ibt_status_t
hermon_ci_query_cq_handler_id(ibc_hca_hdl_t hca,ibt_cq_handler_id_t hid,ibt_cq_handler_attr_t * attrs)1008 hermon_ci_query_cq_handler_id(ibc_hca_hdl_t hca,
1009 ibt_cq_handler_id_t hid, ibt_cq_handler_attr_t *attrs)
1010 {
1011 hermon_state_t *state;
1012
1013 state = (hermon_state_t *)hca;
1014 if (!HERMON_HID_VALID(state, hid))
1015 return (IBT_CQ_HID_INVALID);
1016 if (attrs == NULL)
1017 return (IBT_INVALID_PARAM);
1018 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attrs))
1019 attrs->cha_ih = state->hs_intrmsi_hdl[hid - 1];
1020 attrs->cha_dip = state->hs_dip;
1021 return (IBT_SUCCESS);
1022 }
1023
1024 /*
1025 * hermon_ci_alloc_eec()
1026 * Allocate an End-to-End context
1027 * Context: Can be called only from user or kernel context.
1028 */
1029 /* ARGSUSED */
1030 static ibt_status_t
hermon_ci_alloc_eec(ibc_hca_hdl_t hca,ibc_eec_flags_t flags,ibt_eec_hdl_t ibt_eec,ibc_rdd_hdl_t rdd,ibc_eec_hdl_t * eec_p)1031 hermon_ci_alloc_eec(ibc_hca_hdl_t hca, ibc_eec_flags_t flags,
1032 ibt_eec_hdl_t ibt_eec, ibc_rdd_hdl_t rdd, ibc_eec_hdl_t *eec_p)
1033 {
1034 /*
1035 * This is an unsupported interface for the Hermon driver. This
1036 * interface is necessary to support Reliable Datagram (RD)
1037 * operations. Hermon does not support RD.
1038 */
1039 return (IBT_NOT_SUPPORTED);
1040 }
1041
1042
1043 /*
1044 * hermon_ci_free_eec()
1045 * Free an End-to-End context
1046 * Context: Can be called only from user or kernel context.
1047 */
1048 /* ARGSUSED */
1049 static ibt_status_t
hermon_ci_free_eec(ibc_hca_hdl_t hca,ibc_eec_hdl_t eec)1050 hermon_ci_free_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec)
1051 {
1052 /*
1053 * This is an unsupported interface for the Hermon driver. This
1054 * interface is necessary to support Reliable Datagram (RD)
1055 * operations. Hermon does not support RD.
1056 */
1057 return (IBT_NOT_SUPPORTED);
1058 }
1059
1060
1061 /*
1062 * hermon_ci_query_eec()
1063 * Query an End-to-End context
1064 * Context: Can be called from interrupt or base context.
1065 */
1066 /* ARGSUSED */
1067 static ibt_status_t
hermon_ci_query_eec(ibc_hca_hdl_t hca,ibc_eec_hdl_t eec,ibt_eec_query_attr_t * attr_p)1068 hermon_ci_query_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1069 ibt_eec_query_attr_t *attr_p)
1070 {
1071 /*
1072 * This is an unsupported interface for the Hermon driver. This
1073 * interface is necessary to support Reliable Datagram (RD)
1074 * operations. Hermon does not support RD.
1075 */
1076 return (IBT_NOT_SUPPORTED);
1077 }
1078
1079
1080 /*
1081 * hermon_ci_modify_eec()
1082 * Modify an End-to-End context
1083 * Context: Can be called from interrupt or base context.
1084 */
1085 /* ARGSUSED */
1086 static ibt_status_t
hermon_ci_modify_eec(ibc_hca_hdl_t hca,ibc_eec_hdl_t eec,ibt_cep_modify_flags_t flags,ibt_eec_info_t * info_p)1087 hermon_ci_modify_eec(ibc_hca_hdl_t hca, ibc_eec_hdl_t eec,
1088 ibt_cep_modify_flags_t flags, ibt_eec_info_t *info_p)
1089 {
1090 /*
1091 * This is an unsupported interface for the Hermon driver. This
1092 * interface is necessary to support Reliable Datagram (RD)
1093 * operations. Hermon does not support RD.
1094 */
1095 return (IBT_NOT_SUPPORTED);
1096 }
1097
1098
1099 /*
1100 * hermon_ci_register_mr()
1101 * Prepare a virtually addressed Memory Region for use by an HCA
1102 * Context: Can be called from interrupt or base context.
1103 */
1104 /* ARGSUSED */
1105 static ibt_status_t
hermon_ci_register_mr(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_mr_attr_t * mr_attr,void * ibtl_reserved,ibc_mr_hdl_t * mr_p,ibt_mr_desc_t * mr_desc)1106 hermon_ci_register_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1107 ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1108 ibt_mr_desc_t *mr_desc)
1109 {
1110 hermon_mr_options_t op;
1111 hermon_state_t *state;
1112 hermon_pdhdl_t pdhdl;
1113 hermon_mrhdl_t mrhdl;
1114 int status;
1115
1116 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1117
1118 ASSERT(mr_attr != NULL);
1119 ASSERT(mr_p != NULL);
1120 ASSERT(mr_desc != NULL);
1121
1122 /*
1123 * Validate the access flags. Both Remote Write and Remote Atomic
1124 * require the Local Write flag to be set
1125 */
1126 if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1127 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1128 !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1129 return (IBT_MR_ACCESS_REQ_INVALID);
1130 }
1131
1132 /* Grab the Hermon softstate pointer and PD handle */
1133 state = (hermon_state_t *)hca;
1134 pdhdl = (hermon_pdhdl_t)pd;
1135
1136 /* Register the memory region */
1137 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1138 op.mro_bind_dmahdl = NULL;
1139 op.mro_bind_override_addr = 0;
1140 status = hermon_mr_register(state, pdhdl, mr_attr, &mrhdl,
1141 &op, HERMON_MPT_DMPT);
1142 if (status != DDI_SUCCESS) {
1143 return (status);
1144 }
1145 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1146
1147 /* Fill in the mr_desc structure */
1148 mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1149 mr_desc->md_lkey = mrhdl->mr_lkey;
1150 /* Only set RKey if remote access was requested */
1151 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1152 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1153 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1154 mr_desc->md_rkey = mrhdl->mr_rkey;
1155 }
1156
1157 /*
1158 * If region is mapped for streaming (i.e. noncoherent), then set
1159 * sync is required
1160 */
1161 mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1162 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1163
1164 /* Return the Hermon MR handle */
1165 *mr_p = (ibc_mr_hdl_t)mrhdl;
1166
1167 return (IBT_SUCCESS);
1168 }
1169
1170
1171 /*
1172 * hermon_ci_register_buf()
1173 * Prepare a Memory Region specified by buf structure for use by an HCA
1174 * Context: Can be called from interrupt or base context.
1175 */
1176 /* ARGSUSED */
1177 static ibt_status_t
hermon_ci_register_buf(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_smr_attr_t * attrp,struct buf * buf,void * ibtl_reserved,ibt_mr_hdl_t * mr_p,ibt_mr_desc_t * mr_desc)1178 hermon_ci_register_buf(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1179 ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1180 ibt_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1181 {
1182 hermon_mr_options_t op;
1183 hermon_state_t *state;
1184 hermon_pdhdl_t pdhdl;
1185 hermon_mrhdl_t mrhdl;
1186 int status;
1187 ibt_mr_flags_t flags = attrp->mr_flags;
1188
1189 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1190
1191 ASSERT(mr_p != NULL);
1192 ASSERT(mr_desc != NULL);
1193
1194 /*
1195 * Validate the access flags. Both Remote Write and Remote Atomic
1196 * require the Local Write flag to be set
1197 */
1198 if (((flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1199 (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1200 !(flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1201 return (IBT_MR_ACCESS_REQ_INVALID);
1202 }
1203
1204 /* Grab the Hermon softstate pointer and PD handle */
1205 state = (hermon_state_t *)hca;
1206 pdhdl = (hermon_pdhdl_t)pd;
1207
1208 /* Register the memory region */
1209 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1210 op.mro_bind_dmahdl = NULL;
1211 op.mro_bind_override_addr = 0;
1212 status = hermon_mr_register_buf(state, pdhdl, attrp, buf,
1213 &mrhdl, &op, HERMON_MPT_DMPT);
1214 if (status != DDI_SUCCESS) {
1215 return (status);
1216 }
1217 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1218
1219 /* Fill in the mr_desc structure */
1220 mr_desc->md_vaddr = mrhdl->mr_bindinfo.bi_addr;
1221 mr_desc->md_lkey = mrhdl->mr_lkey;
1222 /* Only set RKey if remote access was requested */
1223 if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1224 (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1225 (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1226 mr_desc->md_rkey = mrhdl->mr_rkey;
1227 }
1228
1229 /*
1230 * If region is mapped for streaming (i.e. noncoherent), then set
1231 * sync is required
1232 */
1233 mr_desc->md_sync_required = (mrhdl->mr_bindinfo.bi_flags &
1234 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1235
1236 /* Return the Hermon MR handle */
1237 *mr_p = (ibc_mr_hdl_t)mrhdl;
1238
1239 return (IBT_SUCCESS);
1240 }
1241
1242
1243 /*
1244 * hermon_ci_deregister_mr()
1245 * Deregister a Memory Region from an HCA translation table
1246 * Context: Can be called only from user or kernel context.
1247 */
1248 static ibt_status_t
hermon_ci_deregister_mr(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr)1249 hermon_ci_deregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
1250 {
1251 hermon_state_t *state;
1252 hermon_mrhdl_t mrhdl;
1253 int status;
1254
1255 /* Grab the Hermon softstate pointer */
1256 state = (hermon_state_t *)hca;
1257 mrhdl = (hermon_mrhdl_t)mr;
1258
1259 /*
1260 * Deregister the memory region.
1261 */
1262 status = hermon_mr_deregister(state, &mrhdl, HERMON_MR_DEREG_ALL,
1263 HERMON_NOSLEEP);
1264 return (status);
1265 }
1266
1267
1268 /*
1269 * hermon_ci_query_mr()
1270 * Retrieve information about a specified Memory Region
1271 * Context: Can be called from interrupt or base context.
1272 */
1273 static ibt_status_t
hermon_ci_query_mr(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr,ibt_mr_query_attr_t * mr_attr)1274 hermon_ci_query_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1275 ibt_mr_query_attr_t *mr_attr)
1276 {
1277 hermon_state_t *state;
1278 hermon_mrhdl_t mrhdl;
1279 int status;
1280
1281 ASSERT(mr_attr != NULL);
1282
1283 /* Grab the Hermon softstate pointer and MR handle */
1284 state = (hermon_state_t *)hca;
1285 mrhdl = (hermon_mrhdl_t)mr;
1286
1287 /* Query the memory region */
1288 status = hermon_mr_query(state, mrhdl, mr_attr);
1289 return (status);
1290 }
1291
1292
1293 /*
1294 * hermon_ci_register_shared_mr()
1295 * Create a shared memory region matching an existing Memory Region
1296 * Context: Can be called from interrupt or base context.
1297 */
1298 /* ARGSUSED */
1299 static ibt_status_t
hermon_ci_register_shared_mr(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr,ibc_pd_hdl_t pd,ibt_smr_attr_t * mr_attr,void * ibtl_reserved,ibc_mr_hdl_t * mr_p,ibt_mr_desc_t * mr_desc)1300 hermon_ci_register_shared_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
1301 ibc_pd_hdl_t pd, ibt_smr_attr_t *mr_attr, void *ibtl_reserved,
1302 ibc_mr_hdl_t *mr_p, ibt_mr_desc_t *mr_desc)
1303 {
1304 hermon_state_t *state;
1305 hermon_pdhdl_t pdhdl;
1306 hermon_mrhdl_t mrhdl, mrhdl_new;
1307 int status;
1308
1309 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1310
1311 ASSERT(mr_attr != NULL);
1312 ASSERT(mr_p != NULL);
1313 ASSERT(mr_desc != NULL);
1314
1315 /*
1316 * Validate the access flags. Both Remote Write and Remote Atomic
1317 * require the Local Write flag to be set
1318 */
1319 if (((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1320 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1321 !(mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1322 return (IBT_MR_ACCESS_REQ_INVALID);
1323 }
1324
1325 /* Grab the Hermon softstate pointer and handles */
1326 state = (hermon_state_t *)hca;
1327 pdhdl = (hermon_pdhdl_t)pd;
1328 mrhdl = (hermon_mrhdl_t)mr;
1329
1330 /* Register the shared memory region */
1331 status = hermon_mr_register_shared(state, mrhdl, pdhdl, mr_attr,
1332 &mrhdl_new);
1333 if (status != DDI_SUCCESS) {
1334 return (status);
1335 }
1336 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1337
1338 /* Fill in the mr_desc structure */
1339 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1340 mr_desc->md_lkey = mrhdl_new->mr_lkey;
1341 /* Only set RKey if remote access was requested */
1342 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1343 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1344 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1345 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1346 }
1347
1348 /*
1349 * If shared region is mapped for streaming (i.e. noncoherent), then
1350 * set sync is required
1351 */
1352 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1353 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1354
1355 /* Return the Hermon MR handle */
1356 *mr_p = (ibc_mr_hdl_t)mrhdl_new;
1357
1358 return (IBT_SUCCESS);
1359 }
1360
1361
1362 /*
1363 * hermon_ci_reregister_mr()
1364 * Modify the attributes of an existing Memory Region
1365 * Context: Can be called from interrupt or base context.
1366 */
1367 /* ARGSUSED */
1368 static ibt_status_t
hermon_ci_reregister_mr(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr,ibc_pd_hdl_t pd,ibt_mr_attr_t * mr_attr,void * ibtl_reserved,ibc_mr_hdl_t * mr_new,ibt_mr_desc_t * mr_desc)1369 hermon_ci_reregister_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1370 ibt_mr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_new,
1371 ibt_mr_desc_t *mr_desc)
1372 {
1373 hermon_mr_options_t op;
1374 hermon_state_t *state;
1375 hermon_pdhdl_t pdhdl;
1376 hermon_mrhdl_t mrhdl, mrhdl_new;
1377 int status;
1378
1379 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1380
1381 ASSERT(mr_attr != NULL);
1382 ASSERT(mr_new != NULL);
1383 ASSERT(mr_desc != NULL);
1384
1385 /* Grab the Hermon softstate pointer, mrhdl, and pdhdl */
1386 state = (hermon_state_t *)hca;
1387 mrhdl = (hermon_mrhdl_t)mr;
1388 pdhdl = (hermon_pdhdl_t)pd;
1389
1390 /* Reregister the memory region */
1391 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1392 status = hermon_mr_reregister(state, mrhdl, pdhdl, mr_attr,
1393 &mrhdl_new, &op);
1394 if (status != DDI_SUCCESS) {
1395 return (status);
1396 }
1397 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1398
1399 /* Fill in the mr_desc structure */
1400 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1401 mr_desc->md_lkey = mrhdl_new->mr_lkey;
1402 /* Only set RKey if remote access was requested */
1403 if ((mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1404 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1405 (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1406 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1407 }
1408
1409 /*
1410 * If region is mapped for streaming (i.e. noncoherent), then set
1411 * sync is required
1412 */
1413 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1414 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1415
1416 /* Return the Hermon MR handle */
1417 *mr_new = (ibc_mr_hdl_t)mrhdl_new;
1418
1419 return (IBT_SUCCESS);
1420 }
1421
1422
1423 /*
1424 * hermon_ci_reregister_buf()
1425 * Modify the attributes of an existing Memory Region
1426 * Context: Can be called from interrupt or base context.
1427 */
1428 /* ARGSUSED */
1429 static ibt_status_t
hermon_ci_reregister_buf(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr,ibc_pd_hdl_t pd,ibt_smr_attr_t * attrp,struct buf * buf,void * ibtl_reserved,ibc_mr_hdl_t * mr_new,ibt_mr_desc_t * mr_desc)1430 hermon_ci_reregister_buf(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr, ibc_pd_hdl_t pd,
1431 ibt_smr_attr_t *attrp, struct buf *buf, void *ibtl_reserved,
1432 ibc_mr_hdl_t *mr_new, ibt_mr_desc_t *mr_desc)
1433 {
1434 hermon_mr_options_t op;
1435 hermon_state_t *state;
1436 hermon_pdhdl_t pdhdl;
1437 hermon_mrhdl_t mrhdl, mrhdl_new;
1438 int status;
1439 ibt_mr_flags_t flags = attrp->mr_flags;
1440
1441 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1442
1443 ASSERT(mr_new != NULL);
1444 ASSERT(mr_desc != NULL);
1445
1446 /* Grab the Hermon softstate pointer, mrhdl, and pdhdl */
1447 state = (hermon_state_t *)hca;
1448 mrhdl = (hermon_mrhdl_t)mr;
1449 pdhdl = (hermon_pdhdl_t)pd;
1450
1451 /* Reregister the memory region */
1452 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass;
1453 status = hermon_mr_reregister_buf(state, mrhdl, pdhdl, attrp, buf,
1454 &mrhdl_new, &op);
1455 if (status != DDI_SUCCESS) {
1456 return (status);
1457 }
1458 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl_new))
1459
1460 /* Fill in the mr_desc structure */
1461 mr_desc->md_vaddr = mrhdl_new->mr_bindinfo.bi_addr;
1462 mr_desc->md_lkey = mrhdl_new->mr_lkey;
1463 /* Only set RKey if remote access was requested */
1464 if ((flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1465 (flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1466 (flags & IBT_MR_ENABLE_REMOTE_READ)) {
1467 mr_desc->md_rkey = mrhdl_new->mr_rkey;
1468 }
1469
1470 /*
1471 * If region is mapped for streaming (i.e. noncoherent), then set
1472 * sync is required
1473 */
1474 mr_desc->md_sync_required = (mrhdl_new->mr_bindinfo.bi_flags &
1475 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
1476
1477 /* Return the Hermon MR handle */
1478 *mr_new = (ibc_mr_hdl_t)mrhdl_new;
1479
1480 return (IBT_SUCCESS);
1481 }
1482
1483 /*
1484 * hermon_ci_sync_mr()
1485 * Synchronize access to a Memory Region
1486 * Context: Can be called from interrupt or base context.
1487 */
1488 static ibt_status_t
hermon_ci_sync_mr(ibc_hca_hdl_t hca,ibt_mr_sync_t * mr_segs,size_t num_segs)1489 hermon_ci_sync_mr(ibc_hca_hdl_t hca, ibt_mr_sync_t *mr_segs, size_t num_segs)
1490 {
1491 hermon_state_t *state;
1492 int status;
1493
1494 ASSERT(mr_segs != NULL);
1495
1496 /* Grab the Hermon softstate pointer */
1497 state = (hermon_state_t *)hca;
1498
1499 /* Sync the memory region */
1500 status = hermon_mr_sync(state, mr_segs, num_segs);
1501 return (status);
1502 }
1503
1504
1505 /*
1506 * hermon_ci_alloc_mw()
1507 * Allocate a Memory Window
1508 * Context: Can be called from interrupt or base context.
1509 */
1510 static ibt_status_t
hermon_ci_alloc_mw(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_mw_flags_t flags,ibc_mw_hdl_t * mw_p,ibt_rkey_t * rkey_p)1511 hermon_ci_alloc_mw(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd, ibt_mw_flags_t flags,
1512 ibc_mw_hdl_t *mw_p, ibt_rkey_t *rkey_p)
1513 {
1514 hermon_state_t *state;
1515 hermon_pdhdl_t pdhdl;
1516 hermon_mwhdl_t mwhdl;
1517 int status;
1518
1519 ASSERT(mw_p != NULL);
1520 ASSERT(rkey_p != NULL);
1521
1522 /* Grab the Hermon softstate pointer and PD handle */
1523 state = (hermon_state_t *)hca;
1524 pdhdl = (hermon_pdhdl_t)pd;
1525
1526 /* Allocate the memory window */
1527 status = hermon_mw_alloc(state, pdhdl, flags, &mwhdl);
1528 if (status != DDI_SUCCESS) {
1529 return (status);
1530 }
1531 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mwhdl))
1532
1533 /* Return the MW handle and RKey */
1534 *mw_p = (ibc_mw_hdl_t)mwhdl;
1535 *rkey_p = mwhdl->mr_rkey;
1536
1537 return (IBT_SUCCESS);
1538 }
1539
1540
1541 /*
1542 * hermon_ci_free_mw()
1543 * Free a Memory Window
1544 * Context: Can be called from interrupt or base context.
1545 */
1546 static ibt_status_t
hermon_ci_free_mw(ibc_hca_hdl_t hca,ibc_mw_hdl_t mw)1547 hermon_ci_free_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw)
1548 {
1549 hermon_state_t *state;
1550 hermon_mwhdl_t mwhdl;
1551 int status;
1552
1553 /* Grab the Hermon softstate pointer and MW handle */
1554 state = (hermon_state_t *)hca;
1555 mwhdl = (hermon_mwhdl_t)mw;
1556
1557 /* Free the memory window */
1558 status = hermon_mw_free(state, &mwhdl, HERMON_NOSLEEP);
1559 return (status);
1560 }
1561
1562
1563 /*
1564 * hermon_ci_query_mw()
1565 * Return the attributes of the specified Memory Window
1566 * Context: Can be called from interrupt or base context.
1567 */
1568 /* ARGSUSED */
1569 static ibt_status_t
hermon_ci_query_mw(ibc_hca_hdl_t hca,ibc_mw_hdl_t mw,ibt_mw_query_attr_t * mw_attr_p)1570 hermon_ci_query_mw(ibc_hca_hdl_t hca, ibc_mw_hdl_t mw,
1571 ibt_mw_query_attr_t *mw_attr_p)
1572 {
1573 hermon_mwhdl_t mwhdl;
1574
1575 ASSERT(mw_attr_p != NULL);
1576
1577 /* Query the memory window pointer and fill in the return values */
1578 mwhdl = (hermon_mwhdl_t)mw;
1579 mutex_enter(&mwhdl->mr_lock);
1580 mw_attr_p->mw_pd = (ibc_pd_hdl_t)mwhdl->mr_pdhdl;
1581 mw_attr_p->mw_rkey = mwhdl->mr_rkey;
1582 mutex_exit(&mwhdl->mr_lock);
1583
1584 return (IBT_SUCCESS);
1585 }
1586
1587
1588 /*
1589 * hermon_ci_register_dma_mr()
1590 * Allocate a memory region that maps physical addresses.
1591 * Context: Can be called only from user or kernel context.
1592 */
1593 /* ARGSUSED */
1594 static ibt_status_t
hermon_ci_register_dma_mr(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_dmr_attr_t * mr_attr,void * ibtl_reserved,ibc_mr_hdl_t * mr_p,ibt_mr_desc_t * mr_desc)1595 hermon_ci_register_dma_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
1596 ibt_dmr_attr_t *mr_attr, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
1597 ibt_mr_desc_t *mr_desc)
1598 {
1599 hermon_state_t *state;
1600 hermon_pdhdl_t pdhdl;
1601 hermon_mrhdl_t mrhdl;
1602 int status;
1603
1604 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_desc))
1605
1606 ASSERT(mr_attr != NULL);
1607 ASSERT(mr_p != NULL);
1608 ASSERT(mr_desc != NULL);
1609
1610 /*
1611 * Validate the access flags. Both Remote Write and Remote Atomic
1612 * require the Local Write flag to be set
1613 */
1614 if (((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1615 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
1616 !(mr_attr->dmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
1617 return (IBT_MR_ACCESS_REQ_INVALID);
1618 }
1619
1620 /* Grab the Hermon softstate pointer and PD handle */
1621 state = (hermon_state_t *)hca;
1622 pdhdl = (hermon_pdhdl_t)pd;
1623
1624 status = hermon_dma_mr_register(state, pdhdl, mr_attr, &mrhdl);
1625 if (status != DDI_SUCCESS) {
1626 return (status);
1627 }
1628 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
1629
1630 /* Fill in the mr_desc structure */
1631 mr_desc->md_vaddr = mr_attr->dmr_paddr;
1632 mr_desc->md_lkey = mrhdl->mr_lkey;
1633 /* Only set RKey if remote access was requested */
1634 if ((mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC) ||
1635 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
1636 (mr_attr->dmr_flags & IBT_MR_ENABLE_REMOTE_READ)) {
1637 mr_desc->md_rkey = mrhdl->mr_rkey;
1638 }
1639
1640 /*
1641 * If region is mapped for streaming (i.e. noncoherent), then set
1642 * sync is required
1643 */
1644 mr_desc->md_sync_required = B_FALSE;
1645
1646 /* Return the Hermon MR handle */
1647 *mr_p = (ibc_mr_hdl_t)mrhdl;
1648
1649 return (IBT_SUCCESS);
1650 }
1651
1652
1653 /*
1654 * hermon_ci_attach_mcg()
1655 * Attach a Queue Pair to a Multicast Group
1656 * Context: Can be called only from user or kernel context.
1657 */
1658 static ibt_status_t
hermon_ci_attach_mcg(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ib_gid_t gid,ib_lid_t lid)1659 hermon_ci_attach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
1660 ib_lid_t lid)
1661 {
1662 hermon_state_t *state;
1663 hermon_qphdl_t qphdl;
1664 int status;
1665
1666 /* Grab the Hermon softstate pointer and QP handles */
1667 state = (hermon_state_t *)hca;
1668 qphdl = (hermon_qphdl_t)qp;
1669
1670 /* Attach the QP to the multicast group */
1671 status = hermon_mcg_attach(state, qphdl, gid, lid);
1672 return (status);
1673 }
1674
1675
1676 /*
1677 * hermon_ci_detach_mcg()
1678 * Detach a Queue Pair to a Multicast Group
1679 * Context: Can be called only from user or kernel context.
1680 */
1681 static ibt_status_t
hermon_ci_detach_mcg(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ib_gid_t gid,ib_lid_t lid)1682 hermon_ci_detach_mcg(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ib_gid_t gid,
1683 ib_lid_t lid)
1684 {
1685 hermon_state_t *state;
1686 hermon_qphdl_t qphdl;
1687 int status;
1688
1689 /* Grab the Hermon softstate pointer and QP handle */
1690 state = (hermon_state_t *)hca;
1691 qphdl = (hermon_qphdl_t)qp;
1692
1693 /* Detach the QP from the multicast group */
1694 status = hermon_mcg_detach(state, qphdl, gid, lid);
1695 return (status);
1696 }
1697
1698
1699 /*
1700 * hermon_ci_post_send()
1701 * Post send work requests to the send queue on the specified QP
1702 * Context: Can be called from interrupt or base context.
1703 */
1704 static ibt_status_t
hermon_ci_post_send(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ibt_send_wr_t * wr_p,uint_t num_wr,uint_t * num_posted_p)1705 hermon_ci_post_send(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_send_wr_t *wr_p,
1706 uint_t num_wr, uint_t *num_posted_p)
1707 {
1708 hermon_state_t *state;
1709 hermon_qphdl_t qphdl;
1710 int status;
1711
1712 ASSERT(wr_p != NULL);
1713 ASSERT(num_wr != 0);
1714
1715 /* Grab the Hermon softstate pointer and QP handle */
1716 state = (hermon_state_t *)hca;
1717 qphdl = (hermon_qphdl_t)qp;
1718
1719 /* Post the send WQEs */
1720 status = hermon_post_send(state, qphdl, wr_p, num_wr, num_posted_p);
1721 return (status);
1722 }
1723
1724
1725 /*
1726 * hermon_ci_post_recv()
1727 * Post receive work requests to the receive queue on the specified QP
1728 * Context: Can be called from interrupt or base context.
1729 */
1730 static ibt_status_t
hermon_ci_post_recv(ibc_hca_hdl_t hca,ibc_qp_hdl_t qp,ibt_recv_wr_t * wr_p,uint_t num_wr,uint_t * num_posted_p)1731 hermon_ci_post_recv(ibc_hca_hdl_t hca, ibc_qp_hdl_t qp, ibt_recv_wr_t *wr_p,
1732 uint_t num_wr, uint_t *num_posted_p)
1733 {
1734 hermon_state_t *state;
1735 hermon_qphdl_t qphdl;
1736 int status;
1737
1738 ASSERT(wr_p != NULL);
1739 ASSERT(num_wr != 0);
1740
1741 state = (hermon_state_t *)hca;
1742 qphdl = (hermon_qphdl_t)qp;
1743
1744 /* Post the receive WQEs */
1745 status = hermon_post_recv(state, qphdl, wr_p, num_wr, num_posted_p);
1746 return (status);
1747 }
1748
1749
1750 /*
1751 * hermon_ci_poll_cq()
1752 * Poll for a work request completion
1753 * Context: Can be called from interrupt or base context.
1754 */
1755 static ibt_status_t
hermon_ci_poll_cq(ibc_hca_hdl_t hca,ibc_cq_hdl_t cq,ibt_wc_t * wc_p,uint_t num_wc,uint_t * num_polled)1756 hermon_ci_poll_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq, ibt_wc_t *wc_p,
1757 uint_t num_wc, uint_t *num_polled)
1758 {
1759 hermon_state_t *state;
1760 hermon_cqhdl_t cqhdl;
1761 int status;
1762
1763 ASSERT(wc_p != NULL);
1764
1765 /* Check for valid num_wc field */
1766 if (num_wc == 0) {
1767 return (IBT_INVALID_PARAM);
1768 }
1769
1770 /* Grab the Hermon softstate pointer and CQ handle */
1771 state = (hermon_state_t *)hca;
1772 cqhdl = (hermon_cqhdl_t)cq;
1773
1774 /* Poll for work request completions */
1775 status = hermon_cq_poll(state, cqhdl, wc_p, num_wc, num_polled);
1776 return (status);
1777 }
1778
1779
1780 /*
1781 * hermon_ci_notify_cq()
1782 * Enable notification events on the specified CQ
1783 * Context: Can be called from interrupt or base context.
1784 */
1785 static ibt_status_t
hermon_ci_notify_cq(ibc_hca_hdl_t hca,ibc_cq_hdl_t cq_hdl,ibt_cq_notify_flags_t flags)1786 hermon_ci_notify_cq(ibc_hca_hdl_t hca, ibc_cq_hdl_t cq_hdl,
1787 ibt_cq_notify_flags_t flags)
1788 {
1789 hermon_state_t *state;
1790 hermon_cqhdl_t cqhdl;
1791 int status;
1792
1793 /* Grab the Hermon softstate pointer and CQ handle */
1794 state = (hermon_state_t *)hca;
1795 cqhdl = (hermon_cqhdl_t)cq_hdl;
1796
1797 /* Enable the CQ notification */
1798 status = hermon_cq_notify(state, cqhdl, flags);
1799 return (status);
1800 }
1801
1802 /*
1803 * hermon_ci_ci_data_in()
1804 * Exchange CI-specific data.
1805 * Context: Can be called only from user or kernel context.
1806 */
1807 static ibt_status_t
hermon_ci_ci_data_in(ibc_hca_hdl_t hca,ibt_ci_data_flags_t flags,ibt_object_type_t object,void * ibc_object_handle,void * data_p,size_t data_sz)1808 hermon_ci_ci_data_in(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
1809 ibt_object_type_t object, void *ibc_object_handle, void *data_p,
1810 size_t data_sz)
1811 {
1812 hermon_state_t *state;
1813 int status;
1814
1815 /* Grab the Hermon softstate pointer */
1816 state = (hermon_state_t *)hca;
1817
1818 /* Get the Hermon userland mapping information */
1819 status = hermon_umap_ci_data_in(state, flags, object,
1820 ibc_object_handle, data_p, data_sz);
1821 return (status);
1822 }
1823
1824 /*
1825 * hermon_ci_ci_data_out()
1826 * Exchange CI-specific data.
1827 * Context: Can be called only from user or kernel context.
1828 */
1829 static ibt_status_t
hermon_ci_ci_data_out(ibc_hca_hdl_t hca,ibt_ci_data_flags_t flags,ibt_object_type_t object,void * ibc_object_handle,void * data_p,size_t data_sz)1830 hermon_ci_ci_data_out(ibc_hca_hdl_t hca, ibt_ci_data_flags_t flags,
1831 ibt_object_type_t object, void *ibc_object_handle, void *data_p,
1832 size_t data_sz)
1833 {
1834 hermon_state_t *state;
1835 int status;
1836
1837 /* Grab the Hermon softstate pointer */
1838 state = (hermon_state_t *)hca;
1839
1840 /* Get the Hermon userland mapping information */
1841 status = hermon_umap_ci_data_out(state, flags, object,
1842 ibc_object_handle, data_p, data_sz);
1843 return (status);
1844 }
1845
1846
1847 /*
1848 * hermon_ci_alloc_srq()
1849 * Allocate a Shared Receive Queue (SRQ)
1850 * Context: Can be called only from user or kernel context
1851 */
1852 static ibt_status_t
hermon_ci_alloc_srq(ibc_hca_hdl_t hca,ibt_srq_flags_t flags,ibt_srq_hdl_t ibt_srq,ibc_pd_hdl_t pd,ibt_srq_sizes_t * sizes,ibc_srq_hdl_t * ibc_srq_p,ibt_srq_sizes_t * ret_sizes_p)1853 hermon_ci_alloc_srq(ibc_hca_hdl_t hca, ibt_srq_flags_t flags,
1854 ibt_srq_hdl_t ibt_srq, ibc_pd_hdl_t pd, ibt_srq_sizes_t *sizes,
1855 ibc_srq_hdl_t *ibc_srq_p, ibt_srq_sizes_t *ret_sizes_p)
1856 {
1857 hermon_state_t *state;
1858 hermon_pdhdl_t pdhdl;
1859 hermon_srqhdl_t srqhdl;
1860 hermon_srq_info_t srqinfo;
1861 int status;
1862
1863 state = (hermon_state_t *)hca;
1864 pdhdl = (hermon_pdhdl_t)pd;
1865
1866 srqinfo.srqi_ibt_srqhdl = ibt_srq;
1867 srqinfo.srqi_pd = pdhdl;
1868 srqinfo.srqi_sizes = sizes;
1869 srqinfo.srqi_real_sizes = ret_sizes_p;
1870 srqinfo.srqi_srqhdl = &srqhdl;
1871 srqinfo.srqi_flags = flags;
1872
1873 status = hermon_srq_alloc(state, &srqinfo, HERMON_NOSLEEP);
1874 if (status != DDI_SUCCESS) {
1875 return (status);
1876 }
1877
1878 *ibc_srq_p = (ibc_srq_hdl_t)srqhdl;
1879
1880 return (IBT_SUCCESS);
1881 }
1882
1883 /*
1884 * hermon_ci_free_srq()
1885 * Free a Shared Receive Queue (SRQ)
1886 * Context: Can be called only from user or kernel context
1887 */
1888 static ibt_status_t
hermon_ci_free_srq(ibc_hca_hdl_t hca,ibc_srq_hdl_t srq)1889 hermon_ci_free_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq)
1890 {
1891 hermon_state_t *state;
1892 hermon_srqhdl_t srqhdl;
1893 int status;
1894
1895 state = (hermon_state_t *)hca;
1896
1897 /* Check for valid SRQ handle pointer */
1898 if (srq == NULL) {
1899 return (IBT_SRQ_HDL_INVALID);
1900 }
1901
1902 srqhdl = (hermon_srqhdl_t)srq;
1903
1904 /* Free the SRQ */
1905 status = hermon_srq_free(state, &srqhdl, HERMON_NOSLEEP);
1906 return (status);
1907 }
1908
1909 /*
1910 * hermon_ci_query_srq()
1911 * Query properties of a Shared Receive Queue (SRQ)
1912 * Context: Can be called from interrupt or base context.
1913 */
1914 /* ARGSUSED */
1915 static ibt_status_t
hermon_ci_query_srq(ibc_hca_hdl_t hca,ibc_srq_hdl_t srq,ibc_pd_hdl_t * pd_p,ibt_srq_sizes_t * sizes_p,uint_t * limit_p)1916 hermon_ci_query_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq, ibc_pd_hdl_t *pd_p,
1917 ibt_srq_sizes_t *sizes_p, uint_t *limit_p)
1918 {
1919 hermon_srqhdl_t srqhdl;
1920
1921 srqhdl = (hermon_srqhdl_t)srq;
1922
1923 mutex_enter(&srqhdl->srq_lock);
1924 if (srqhdl->srq_state == HERMON_SRQ_STATE_ERROR) {
1925 mutex_exit(&srqhdl->srq_lock);
1926 return (IBT_SRQ_ERROR_STATE);
1927 }
1928
1929 *pd_p = (ibc_pd_hdl_t)srqhdl->srq_pdhdl;
1930 sizes_p->srq_wr_sz = srqhdl->srq_real_sizes.srq_wr_sz - 1;
1931 sizes_p->srq_sgl_sz = srqhdl->srq_real_sizes.srq_sgl_sz;
1932 mutex_exit(&srqhdl->srq_lock);
1933 *limit_p = 0;
1934
1935 return (IBT_SUCCESS);
1936 }
1937
1938 /*
1939 * hermon_ci_modify_srq()
1940 * Modify properties of a Shared Receive Queue (SRQ)
1941 * Context: Can be called from interrupt or base context.
1942 */
1943 /* ARGSUSED */
1944 static ibt_status_t
hermon_ci_modify_srq(ibc_hca_hdl_t hca,ibc_srq_hdl_t srq,ibt_srq_modify_flags_t flags,uint_t size,uint_t limit,uint_t * ret_size_p)1945 hermon_ci_modify_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
1946 ibt_srq_modify_flags_t flags, uint_t size, uint_t limit, uint_t *ret_size_p)
1947 {
1948 hermon_state_t *state;
1949 hermon_srqhdl_t srqhdl;
1950 uint_t resize_supported, cur_srq_size;
1951 int status;
1952
1953 state = (hermon_state_t *)hca;
1954 srqhdl = (hermon_srqhdl_t)srq;
1955
1956 /*
1957 * Check Error State of SRQ.
1958 * Also, while we are holding the lock we save away the current SRQ
1959 * size for later use.
1960 */
1961 mutex_enter(&srqhdl->srq_lock);
1962 cur_srq_size = srqhdl->srq_wq_bufsz;
1963 if (srqhdl->srq_state == HERMON_SRQ_STATE_ERROR) {
1964 mutex_exit(&srqhdl->srq_lock);
1965 return (IBT_SRQ_ERROR_STATE);
1966 }
1967 mutex_exit(&srqhdl->srq_lock);
1968
1969 /*
1970 * Setting the limit watermark is not currently supported. This is a
1971 * hermon hardware (firmware) limitation. We return NOT_SUPPORTED here,
1972 * and have the limit code commented out for now.
1973 *
1974 * XXX If we enable the limit watermark support, we need to do checks
1975 * and set the 'srq->srq_wr_limit' here, instead of returning not
1976 * supported. The 'hermon_srq_modify' operation below is for resizing
1977 * the SRQ only, the limit work should be done here. If this is
1978 * changed to use the 'limit' field, the 'ARGSUSED' comment for this
1979 * function should also be removed at that time.
1980 */
1981 if (flags & IBT_SRQ_SET_LIMIT) {
1982 return (IBT_NOT_SUPPORTED);
1983 }
1984
1985 /*
1986 * Check the SET_SIZE flag. If not set, we simply return success here.
1987 * However if it is set, we check if resize is supported and only then
1988 * do we continue on with our resize processing.
1989 */
1990 if (!(flags & IBT_SRQ_SET_SIZE)) {
1991 return (IBT_SUCCESS);
1992 }
1993
1994 resize_supported = state->hs_ibtfinfo.hca_attr->hca_flags &
1995 IBT_HCA_RESIZE_SRQ;
1996
1997 if ((flags & IBT_SRQ_SET_SIZE) && !resize_supported) {
1998 return (IBT_NOT_SUPPORTED);
1999 }
2000
2001 /*
2002 * We do not support resizing an SRQ to be smaller than it's current
2003 * size. If a smaller (or equal) size is requested, then we simply
2004 * return success, and do nothing.
2005 */
2006 if (size <= cur_srq_size) {
2007 *ret_size_p = cur_srq_size;
2008 return (IBT_SUCCESS);
2009 }
2010
2011 status = hermon_srq_modify(state, srqhdl, size, ret_size_p,
2012 HERMON_NOSLEEP);
2013 if (status != DDI_SUCCESS) {
2014 /* Set return value to current SRQ size */
2015 *ret_size_p = cur_srq_size;
2016 return (status);
2017 }
2018
2019 return (IBT_SUCCESS);
2020 }
2021
2022 /*
2023 * hermon_ci_post_srq()
2024 * Post a Work Request to the specified Shared Receive Queue (SRQ)
2025 * Context: Can be called from interrupt or base context.
2026 */
2027 static ibt_status_t
hermon_ci_post_srq(ibc_hca_hdl_t hca,ibc_srq_hdl_t srq,ibt_recv_wr_t * wr,uint_t num_wr,uint_t * num_posted_p)2028 hermon_ci_post_srq(ibc_hca_hdl_t hca, ibc_srq_hdl_t srq,
2029 ibt_recv_wr_t *wr, uint_t num_wr, uint_t *num_posted_p)
2030 {
2031 hermon_state_t *state;
2032 hermon_srqhdl_t srqhdl;
2033 int status;
2034
2035 state = (hermon_state_t *)hca;
2036 srqhdl = (hermon_srqhdl_t)srq;
2037
2038 status = hermon_post_srq(state, srqhdl, wr, num_wr, num_posted_p);
2039 return (status);
2040 }
2041
2042 /* Address translation */
2043
2044 struct ibc_ma_s {
2045 int h_ma_addr_list_len;
2046 void *h_ma_addr_list;
2047 ddi_dma_handle_t h_ma_dmahdl;
2048 ddi_dma_handle_t h_ma_list_hdl;
2049 ddi_acc_handle_t h_ma_list_acc_hdl;
2050 size_t h_ma_real_len;
2051 caddr_t h_ma_kaddr;
2052 ibt_phys_addr_t h_ma_list_cookie;
2053 };
2054
2055 static ibt_status_t
hermon_map_mem_area_fmr(ibc_hca_hdl_t hca,ibt_va_attr_t * va_attrs,uint_t list_len,ibt_pmr_attr_t * pmr,ibc_ma_hdl_t * ma_hdl_p)2056 hermon_map_mem_area_fmr(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs,
2057 uint_t list_len, ibt_pmr_attr_t *pmr, ibc_ma_hdl_t *ma_hdl_p)
2058 {
2059 int status;
2060 ibt_status_t ibt_status;
2061 ibc_ma_hdl_t ma_hdl;
2062 ib_memlen_t len;
2063 ddi_dma_attr_t dma_attr;
2064 uint_t cookie_cnt;
2065 ddi_dma_cookie_t dmacookie;
2066 hermon_state_t *state;
2067 uint64_t *kaddr;
2068 uint64_t addr, endaddr, pagesize;
2069 int i, kmflag;
2070 int (*callback)(caddr_t);
2071
2072 if ((va_attrs->va_flags & IBT_VA_BUF) == 0) {
2073 return (IBT_NOT_SUPPORTED); /* XXX - not yet implemented */
2074 }
2075
2076 state = (hermon_state_t *)hca;
2077 hermon_dma_attr_init(state, &dma_attr);
2078 if (va_attrs->va_flags & IBT_VA_NOSLEEP) {
2079 kmflag = KM_NOSLEEP;
2080 callback = DDI_DMA_DONTWAIT;
2081 } else {
2082 kmflag = KM_SLEEP;
2083 callback = DDI_DMA_SLEEP;
2084 }
2085
2086 ma_hdl = kmem_zalloc(sizeof (*ma_hdl), kmflag);
2087 if (ma_hdl == NULL) {
2088 return (IBT_INSUFF_RESOURCE);
2089 }
2090 #ifdef __sparc
2091 if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2092 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2093
2094 if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2095 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2096 #endif
2097
2098 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ma_hdl))
2099 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2100 callback, NULL, &ma_hdl->h_ma_dmahdl);
2101 if (status != DDI_SUCCESS) {
2102 kmem_free(ma_hdl, sizeof (*ma_hdl));
2103 return (IBT_INSUFF_RESOURCE);
2104 }
2105 status = ddi_dma_buf_bind_handle(ma_hdl->h_ma_dmahdl,
2106 va_attrs->va_buf, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2107 callback, NULL, &dmacookie, &cookie_cnt);
2108 if (status != DDI_DMA_MAPPED) {
2109 status = ibc_get_ci_failure(0);
2110 goto marea_fail3;
2111 }
2112
2113 ma_hdl->h_ma_real_len = list_len * sizeof (ibt_phys_addr_t);
2114 ma_hdl->h_ma_kaddr = kmem_zalloc(ma_hdl->h_ma_real_len, kmflag);
2115 if (ma_hdl->h_ma_kaddr == NULL) {
2116 ibt_status = IBT_INSUFF_RESOURCE;
2117 goto marea_fail4;
2118 }
2119
2120 i = 0;
2121 len = 0;
2122 pagesize = PAGESIZE;
2123 kaddr = (uint64_t *)(void *)ma_hdl->h_ma_kaddr;
2124 while (cookie_cnt-- > 0) {
2125 addr = dmacookie.dmac_laddress;
2126 len += dmacookie.dmac_size;
2127 endaddr = addr + (dmacookie.dmac_size - 1);
2128 addr = addr & ~(pagesize - 1);
2129 while (addr <= endaddr) {
2130 if (i >= list_len) {
2131 status = IBT_PBL_TOO_SMALL;
2132 goto marea_fail5;
2133 }
2134 kaddr[i] = htonll(addr | HERMON_MTT_ENTRY_PRESENT);
2135 i++;
2136 addr += pagesize;
2137 if (addr == 0) {
2138 static int do_once = 1;
2139 _NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2140 do_once))
2141 if (do_once) {
2142 do_once = 0;
2143 cmn_err(CE_NOTE, "probable error in "
2144 "dma_cookie address: map_mem_area");
2145 }
2146 break;
2147 }
2148 }
2149 if (cookie_cnt != 0)
2150 ddi_dma_nextcookie(ma_hdl->h_ma_dmahdl, &dmacookie);
2151 }
2152
2153 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*pmr))
2154 pmr->pmr_addr_list = (ibt_phys_addr_t *)(void *)ma_hdl->h_ma_kaddr;
2155 pmr->pmr_iova = va_attrs->va_vaddr;
2156 pmr->pmr_len = len;
2157 pmr->pmr_offset = va_attrs->va_vaddr & PAGEOFFSET;
2158 pmr->pmr_buf_sz = PAGESHIFT; /* PRM says "Page Sice", but... */
2159 pmr->pmr_num_buf = i;
2160 pmr->pmr_ma = ma_hdl;
2161
2162 *ma_hdl_p = ma_hdl;
2163 return (IBT_SUCCESS);
2164
2165 marea_fail5:
2166 kmem_free(ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len);
2167 marea_fail4:
2168 status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2169 marea_fail3:
2170 ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2171 kmem_free(ma_hdl, sizeof (*ma_hdl));
2172 *ma_hdl_p = NULL;
2173 return (ibt_status);
2174 }
2175
2176 /*
2177 * hermon_ci_map_mem_area()
2178 * Context: Can be called from user or base context.
2179 *
2180 * Creates the memory mapping suitable for a subsequent posting of an
2181 * FRWR work request. All the info about the memory area for the
2182 * FRWR work request (wr member of "union ibt_reg_req_u") is filled
2183 * such that the client only needs to point wr.rc.rcwr.reg_pmr to it,
2184 * and then fill in the additional information only it knows.
2185 *
2186 * Alternatively, creates the memory mapping for FMR.
2187 */
2188 /* ARGSUSED */
2189 static ibt_status_t
hermon_ci_map_mem_area(ibc_hca_hdl_t hca,ibt_va_attr_t * va_attrs,void * ibtl_reserved,uint_t list_len,ibt_reg_req_t * reg_req,ibc_ma_hdl_t * ma_hdl_p)2190 hermon_ci_map_mem_area(ibc_hca_hdl_t hca, ibt_va_attr_t *va_attrs,
2191 void *ibtl_reserved, uint_t list_len, ibt_reg_req_t *reg_req,
2192 ibc_ma_hdl_t *ma_hdl_p)
2193 {
2194 ibt_status_t ibt_status;
2195 int status;
2196 ibc_ma_hdl_t ma_hdl;
2197 ibt_wr_reg_pmr_t *pmr;
2198 ib_memlen_t len;
2199 ddi_dma_attr_t dma_attr;
2200 ddi_dma_handle_t khdl;
2201 uint_t cookie_cnt;
2202 ddi_dma_cookie_t dmacookie, kcookie;
2203 hermon_state_t *state;
2204 uint64_t *kaddr;
2205 uint64_t addr, endaddr, pagesize, kcookie_paddr;
2206 int i, j, kmflag;
2207 int (*callback)(caddr_t);
2208
2209 if (va_attrs->va_flags & (IBT_VA_FMR | IBT_VA_REG_FN)) {
2210 /* delegate FMR and Physical Register to other function */
2211 return (hermon_map_mem_area_fmr(hca, va_attrs, list_len,
2212 ®_req->fn_arg, ma_hdl_p));
2213 }
2214
2215 /* FRWR */
2216
2217 state = (hermon_state_t *)hca;
2218 if (!(state->hs_ibtfinfo.hca_attr->hca_flags2 & IBT_HCA2_MEM_MGT_EXT))
2219 return (IBT_NOT_SUPPORTED);
2220 hermon_dma_attr_init(state, &dma_attr);
2221 #ifdef __sparc
2222 if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2223 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2224
2225 if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2226 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2227 #endif
2228 if (va_attrs->va_flags & IBT_VA_NOSLEEP) {
2229 kmflag = KM_NOSLEEP;
2230 callback = DDI_DMA_DONTWAIT;
2231 } else {
2232 kmflag = KM_SLEEP;
2233 callback = DDI_DMA_SLEEP;
2234 }
2235
2236 ma_hdl = kmem_zalloc(sizeof (*ma_hdl), kmflag);
2237 if (ma_hdl == NULL) {
2238 return (IBT_INSUFF_RESOURCE);
2239 }
2240 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ma_hdl))
2241
2242 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2243 callback, NULL, &ma_hdl->h_ma_dmahdl);
2244 if (status != DDI_SUCCESS) {
2245 ibt_status = IBT_INSUFF_RESOURCE;
2246 goto marea_fail0;
2247 }
2248 dma_attr.dma_attr_align = 64; /* as per PRM */
2249 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2250 callback, NULL, &ma_hdl->h_ma_list_hdl);
2251 if (status != DDI_SUCCESS) {
2252 ibt_status = IBT_INSUFF_RESOURCE;
2253 goto marea_fail1;
2254 }
2255 /*
2256 * Entries in the list in the last slot on each page cannot be used,
2257 * so 1 extra ibt_phys_addr_t is allocated per page. We add 1 more
2258 * to deal with the possibility of a less than 1 page allocation
2259 * across a page boundary.
2260 */
2261 status = ddi_dma_mem_alloc(ma_hdl->h_ma_list_hdl, (list_len + 1 +
2262 list_len / (HERMON_PAGESIZE / sizeof (ibt_phys_addr_t))) *
2263 sizeof (ibt_phys_addr_t),
2264 &state->hs_reg_accattr, DDI_DMA_CONSISTENT, callback, NULL,
2265 &ma_hdl->h_ma_kaddr, &ma_hdl->h_ma_real_len,
2266 &ma_hdl->h_ma_list_acc_hdl);
2267 if (status != DDI_SUCCESS) {
2268 ibt_status = IBT_INSUFF_RESOURCE;
2269 goto marea_fail2;
2270 }
2271 status = ddi_dma_addr_bind_handle(ma_hdl->h_ma_list_hdl, NULL,
2272 ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len, DDI_DMA_RDWR |
2273 DDI_DMA_CONSISTENT, callback, NULL,
2274 &kcookie, &cookie_cnt);
2275 if (status != DDI_SUCCESS) {
2276 ibt_status = IBT_INSUFF_RESOURCE;
2277 goto marea_fail3;
2278 }
2279 if ((kcookie.dmac_laddress & 0x3f) != 0) {
2280 cmn_err(CE_NOTE, "64-byte alignment assumption wrong");
2281 ibt_status = ibc_get_ci_failure(0);
2282 goto marea_fail4;
2283 }
2284 ma_hdl->h_ma_list_cookie.p_laddr = kcookie.dmac_laddress;
2285
2286 if (va_attrs->va_flags & IBT_VA_BUF) {
2287 status = ddi_dma_buf_bind_handle(ma_hdl->h_ma_dmahdl,
2288 va_attrs->va_buf, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2289 callback, NULL, &dmacookie, &cookie_cnt);
2290 } else {
2291 status = ddi_dma_addr_bind_handle(ma_hdl->h_ma_dmahdl,
2292 va_attrs->va_as, (caddr_t)(uintptr_t)va_attrs->va_vaddr,
2293 va_attrs->va_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2294 callback, NULL, &dmacookie, &cookie_cnt);
2295 }
2296 if (status != DDI_DMA_MAPPED) {
2297 ibt_status = ibc_get_ci_failure(0);
2298 goto marea_fail4;
2299 }
2300 i = 0; /* count the number of pbl entries */
2301 j = 0; /* count the number of links to next HERMON_PAGE */
2302 len = 0;
2303 pagesize = PAGESIZE;
2304 kaddr = (uint64_t *)(void *)ma_hdl->h_ma_kaddr;
2305 kcookie.dmac_size += kcookie.dmac_laddress & HERMON_PAGEOFFSET;
2306 kcookie_paddr = kcookie.dmac_laddress & HERMON_PAGEMASK;
2307 khdl = ma_hdl->h_ma_list_hdl;
2308 while (cookie_cnt-- > 0) {
2309 addr = dmacookie.dmac_laddress;
2310 len += dmacookie.dmac_size;
2311 endaddr = addr + (dmacookie.dmac_size - 1);
2312 addr = addr & ~(pagesize - 1);
2313 while (addr <= endaddr) {
2314 if (i >= list_len) {
2315 ibt_status = IBT_PBL_TOO_SMALL;
2316 goto marea_fail5;
2317 }
2318 /* Deal with last entry on page. */
2319 if (!((uintptr_t)&kaddr[i+j+1] & HERMON_PAGEOFFSET)) {
2320 if (kcookie.dmac_size > HERMON_PAGESIZE) {
2321 kcookie_paddr += HERMON_PAGESIZE;
2322 kcookie.dmac_size -= HERMON_PAGESIZE;
2323 } else {
2324 ddi_dma_nextcookie(khdl, &kcookie);
2325 kcookie_paddr = kcookie.dmac_laddress;
2326 }
2327 kaddr[i+j] = htonll(kcookie_paddr);
2328 j++;
2329 }
2330 kaddr[i+j] = htonll(addr | HERMON_MTT_ENTRY_PRESENT);
2331 i++;
2332 addr += pagesize;
2333 if (addr == 0) {
2334 static int do_once = 1;
2335 _NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2336 do_once))
2337 if (do_once) {
2338 do_once = 0;
2339 cmn_err(CE_NOTE, "probable error in "
2340 "dma_cookie address: map_mem_area");
2341 }
2342 break;
2343 }
2344 }
2345 if (cookie_cnt != 0)
2346 ddi_dma_nextcookie(ma_hdl->h_ma_dmahdl, &dmacookie);
2347 }
2348
2349 pmr = ®_req->wr;
2350 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*pmr))
2351 pmr->pmr_len = len;
2352 pmr->pmr_offset = va_attrs->va_vaddr & PAGEOFFSET;
2353 pmr->pmr_buf_sz = PAGESHIFT; /* PRM says "Page Size", but... */
2354 pmr->pmr_num_buf = i;
2355 pmr->pmr_addr_list = &ma_hdl->h_ma_list_cookie;
2356
2357 *ma_hdl_p = ma_hdl;
2358 return (IBT_SUCCESS);
2359
2360 marea_fail5:
2361 status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2362 if (status != DDI_SUCCESS)
2363 HERMON_WARNING(state, "failed to unbind DMA mapping");
2364 marea_fail4:
2365 status = ddi_dma_unbind_handle(ma_hdl->h_ma_list_hdl);
2366 if (status != DDI_SUCCESS)
2367 HERMON_WARNING(state, "failed to unbind DMA mapping");
2368 marea_fail3:
2369 ddi_dma_mem_free(&ma_hdl->h_ma_list_acc_hdl);
2370 marea_fail2:
2371 ddi_dma_free_handle(&ma_hdl->h_ma_list_hdl);
2372 marea_fail1:
2373 ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2374 marea_fail0:
2375 kmem_free(ma_hdl, sizeof (*ma_hdl));
2376 *ma_hdl_p = NULL;
2377 return (ibt_status);
2378 }
2379
2380 /*
2381 * hermon_ci_unmap_mem_area()
2382 * Unmap the memory area
2383 * Context: Can be called from interrupt or base context.
2384 */
2385 /* ARGSUSED */
2386 static ibt_status_t
hermon_ci_unmap_mem_area(ibc_hca_hdl_t hca,ibc_ma_hdl_t ma_hdl)2387 hermon_ci_unmap_mem_area(ibc_hca_hdl_t hca, ibc_ma_hdl_t ma_hdl)
2388 {
2389 int status;
2390 hermon_state_t *state;
2391
2392 if (ma_hdl == NULL) {
2393 return (IBT_MA_HDL_INVALID);
2394 }
2395 state = (hermon_state_t *)hca;
2396 if (ma_hdl->h_ma_list_hdl != NULL) {
2397 status = ddi_dma_unbind_handle(ma_hdl->h_ma_list_hdl);
2398 if (status != DDI_SUCCESS)
2399 HERMON_WARNING(state, "failed to unbind DMA mapping");
2400 ddi_dma_mem_free(&ma_hdl->h_ma_list_acc_hdl);
2401 ddi_dma_free_handle(&ma_hdl->h_ma_list_hdl);
2402 } else {
2403 kmem_free(ma_hdl->h_ma_kaddr, ma_hdl->h_ma_real_len);
2404 }
2405 status = ddi_dma_unbind_handle(ma_hdl->h_ma_dmahdl);
2406 if (status != DDI_SUCCESS)
2407 HERMON_WARNING(state, "failed to unbind DMA mapping");
2408 ddi_dma_free_handle(&ma_hdl->h_ma_dmahdl);
2409 kmem_free(ma_hdl, sizeof (*ma_hdl));
2410 return (IBT_SUCCESS);
2411 }
2412
2413 struct ibc_mi_s {
2414 int imh_len;
2415 ddi_dma_handle_t imh_dmahandle[1];
2416 };
2417 _NOTE(SCHEME_PROTECTS_DATA("safe sharing",
2418 ibc_mi_s::imh_len
2419 ibc_mi_s::imh_dmahandle))
2420
2421
2422 /*
2423 * hermon_ci_map_mem_iov()
2424 * Map the memory
2425 * Context: Can be called from interrupt or base context.
2426 */
2427 /* ARGSUSED */
2428 static ibt_status_t
hermon_ci_map_mem_iov(ibc_hca_hdl_t hca,ibt_iov_attr_t * iov_attr,ibt_all_wr_t * wr,ibc_mi_hdl_t * mi_hdl_p)2429 hermon_ci_map_mem_iov(ibc_hca_hdl_t hca, ibt_iov_attr_t *iov_attr,
2430 ibt_all_wr_t *wr, ibc_mi_hdl_t *mi_hdl_p)
2431 {
2432 int status;
2433 int i, j, nds, max_nds;
2434 uint_t len;
2435 ibt_status_t ibt_status;
2436 ddi_dma_handle_t dmahdl;
2437 ddi_dma_cookie_t dmacookie;
2438 ddi_dma_attr_t dma_attr;
2439 uint_t cookie_cnt;
2440 ibc_mi_hdl_t mi_hdl;
2441 ibt_lkey_t rsvd_lkey;
2442 ibt_wr_ds_t *sgl;
2443 hermon_state_t *state;
2444 int kmflag;
2445 int (*callback)(caddr_t);
2446
2447 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wr))
2448
2449 state = (hermon_state_t *)hca;
2450 hermon_dma_attr_init(state, &dma_attr);
2451 #ifdef __sparc
2452 if (state->hs_cfg_profile->cp_iommu_bypass == HERMON_BINDMEM_BYPASS)
2453 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
2454
2455 if (hermon_kernel_data_ro == HERMON_RO_ENABLED)
2456 dma_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
2457 #endif
2458
2459 nds = 0;
2460 max_nds = iov_attr->iov_wr_nds;
2461 if (iov_attr->iov_lso_hdr_sz)
2462 max_nds -= (iov_attr->iov_lso_hdr_sz + sizeof (uint32_t) +
2463 0xf) >> 4; /* 0xf is for rounding up to a multiple of 16 */
2464 rsvd_lkey = (iov_attr->iov_flags & IBT_IOV_ALT_LKEY) ?
2465 iov_attr->iov_alt_lkey : state->hs_devlim.rsv_lkey;
2466 if ((iov_attr->iov_flags & IBT_IOV_NOSLEEP) == 0) {
2467 kmflag = KM_SLEEP;
2468 callback = DDI_DMA_SLEEP;
2469 } else {
2470 kmflag = KM_NOSLEEP;
2471 callback = DDI_DMA_DONTWAIT;
2472 }
2473
2474 if (iov_attr->iov_flags & IBT_IOV_BUF) {
2475 mi_hdl = kmem_alloc(sizeof (*mi_hdl), kmflag);
2476 if (mi_hdl == NULL)
2477 return (IBT_INSUFF_RESOURCE);
2478 sgl = wr->send.wr_sgl;
2479 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
2480
2481 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2482 callback, NULL, &dmahdl);
2483 if (status != DDI_SUCCESS) {
2484 kmem_free(mi_hdl, sizeof (*mi_hdl));
2485 return (IBT_INSUFF_RESOURCE);
2486 }
2487 status = ddi_dma_buf_bind_handle(dmahdl, iov_attr->iov_buf,
2488 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
2489 &dmacookie, &cookie_cnt);
2490 if (status != DDI_DMA_MAPPED) {
2491 ddi_dma_free_handle(&dmahdl);
2492 kmem_free(mi_hdl, sizeof (*mi_hdl));
2493 return (ibc_get_ci_failure(0));
2494 }
2495 while (cookie_cnt-- > 0) {
2496 if (nds > max_nds) {
2497 status = ddi_dma_unbind_handle(dmahdl);
2498 if (status != DDI_SUCCESS)
2499 HERMON_WARNING(state, "failed to "
2500 "unbind DMA mapping");
2501 ddi_dma_free_handle(&dmahdl);
2502 return (IBT_SGL_TOO_SMALL);
2503 }
2504 sgl[nds].ds_va = dmacookie.dmac_laddress;
2505 sgl[nds].ds_key = rsvd_lkey;
2506 sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
2507 nds++;
2508 if (cookie_cnt != 0)
2509 ddi_dma_nextcookie(dmahdl, &dmacookie);
2510 }
2511 wr->send.wr_nds = nds;
2512 mi_hdl->imh_len = 1;
2513 mi_hdl->imh_dmahandle[0] = dmahdl;
2514 *mi_hdl_p = mi_hdl;
2515 return (IBT_SUCCESS);
2516 }
2517
2518 if (iov_attr->iov_flags & IBT_IOV_RECV)
2519 sgl = wr->recv.wr_sgl;
2520 else
2521 sgl = wr->send.wr_sgl;
2522 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sgl))
2523
2524 len = iov_attr->iov_list_len;
2525 for (i = 0, j = 0; j < len; j++) {
2526 if (iov_attr->iov[j].iov_len == 0)
2527 continue;
2528 i++;
2529 }
2530 mi_hdl = kmem_alloc(sizeof (*mi_hdl) +
2531 (i - 1) * sizeof (ddi_dma_handle_t), kmflag);
2532 if (mi_hdl == NULL)
2533 return (IBT_INSUFF_RESOURCE);
2534 mi_hdl->imh_len = i;
2535 for (i = 0, j = 0; j < len; j++) {
2536 if (iov_attr->iov[j].iov_len == 0)
2537 continue;
2538 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr,
2539 callback, NULL, &dmahdl);
2540 if (status != DDI_SUCCESS) {
2541 ibt_status = IBT_INSUFF_RESOURCE;
2542 goto fail2;
2543 }
2544 status = ddi_dma_addr_bind_handle(dmahdl, iov_attr->iov_as,
2545 iov_attr->iov[j].iov_addr, iov_attr->iov[j].iov_len,
2546 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
2547 &dmacookie, &cookie_cnt);
2548 if (status != DDI_DMA_MAPPED) {
2549 ibt_status = ibc_get_ci_failure(0);
2550 goto fail1;
2551 }
2552 if (nds + cookie_cnt > max_nds) {
2553 ibt_status = IBT_SGL_TOO_SMALL;
2554 goto fail2;
2555 }
2556 while (cookie_cnt-- > 0) {
2557 sgl[nds].ds_va = dmacookie.dmac_laddress;
2558 sgl[nds].ds_key = rsvd_lkey;
2559 sgl[nds].ds_len = (ib_msglen_t)dmacookie.dmac_size;
2560 nds++;
2561 if (cookie_cnt != 0)
2562 ddi_dma_nextcookie(dmahdl, &dmacookie);
2563 }
2564 mi_hdl->imh_dmahandle[i] = dmahdl;
2565 i++;
2566 }
2567
2568 if (iov_attr->iov_flags & IBT_IOV_RECV)
2569 wr->recv.wr_nds = nds;
2570 else
2571 wr->send.wr_nds = nds;
2572 *mi_hdl_p = mi_hdl;
2573 return (IBT_SUCCESS);
2574
2575 fail1:
2576 ddi_dma_free_handle(&dmahdl);
2577 fail2:
2578 while (--i >= 0) {
2579 status = ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
2580 if (status != DDI_SUCCESS)
2581 HERMON_WARNING(state, "failed to unbind DMA mapping");
2582 ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
2583 }
2584 kmem_free(mi_hdl, sizeof (*mi_hdl) +
2585 (len - 1) * sizeof (ddi_dma_handle_t));
2586 *mi_hdl_p = NULL;
2587 return (ibt_status);
2588 }
2589
2590 /*
2591 * hermon_ci_unmap_mem_iov()
2592 * Unmap the memory
2593 * Context: Can be called from interrupt or base context.
2594 */
2595 static ibt_status_t
hermon_ci_unmap_mem_iov(ibc_hca_hdl_t hca,ibc_mi_hdl_t mi_hdl)2596 hermon_ci_unmap_mem_iov(ibc_hca_hdl_t hca, ibc_mi_hdl_t mi_hdl)
2597 {
2598 int status, i;
2599 hermon_state_t *state;
2600
2601 state = (hermon_state_t *)hca;
2602
2603 for (i = mi_hdl->imh_len; --i >= 0; ) {
2604 status = ddi_dma_unbind_handle(mi_hdl->imh_dmahandle[i]);
2605 if (status != DDI_SUCCESS)
2606 HERMON_WARNING(state, "failed to unbind DMA mapping");
2607 ddi_dma_free_handle(&mi_hdl->imh_dmahandle[i]);
2608 }
2609 kmem_free(mi_hdl, sizeof (*mi_hdl) +
2610 (mi_hdl->imh_len - 1) * sizeof (ddi_dma_handle_t));
2611 return (IBT_SUCCESS);
2612 }
2613
2614 /*
2615 * hermon_ci_alloc_lkey()
2616 * Allocate an empty memory region for use with FRWR.
2617 * Context: Can be called from user or base context.
2618 */
2619 /* ARGSUSED */
2620 static ibt_status_t
hermon_ci_alloc_lkey(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_lkey_flags_t flags,uint_t list_sz,ibc_mr_hdl_t * mr_p,ibt_pmr_desc_t * mem_desc_p)2621 hermon_ci_alloc_lkey(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2622 ibt_lkey_flags_t flags, uint_t list_sz, ibc_mr_hdl_t *mr_p,
2623 ibt_pmr_desc_t *mem_desc_p)
2624 {
2625 hermon_state_t *state;
2626 hermon_pdhdl_t pdhdl;
2627 hermon_mrhdl_t mrhdl;
2628 int status;
2629
2630 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mem_desc_p))
2631
2632 ASSERT(mr_p != NULL);
2633 ASSERT(mem_desc_p != NULL);
2634
2635 state = (hermon_state_t *)hca;
2636 pdhdl = (hermon_pdhdl_t)pd;
2637
2638 if (!(state->hs_ibtfinfo.hca_attr->hca_flags2 & IBT_HCA2_MEM_MGT_EXT))
2639 return (IBT_NOT_SUPPORTED);
2640
2641 status = hermon_mr_alloc_lkey(state, pdhdl, flags, list_sz, &mrhdl);
2642 if (status != DDI_SUCCESS) {
2643 return (status);
2644 }
2645 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mrhdl))
2646
2647 /* Fill in the mem_desc_p structure */
2648 mem_desc_p->pmd_iova = 0;
2649 mem_desc_p->pmd_phys_buf_list_sz = list_sz;
2650 mem_desc_p->pmd_lkey = mrhdl->mr_lkey;
2651 /* Only set RKey if remote access was requested */
2652 if (flags & IBT_KEY_REMOTE) {
2653 mem_desc_p->pmd_rkey = mrhdl->mr_rkey;
2654 }
2655 mem_desc_p->pmd_sync_required = B_FALSE;
2656
2657 /* Return the Hermon MR handle */
2658 *mr_p = (ibc_mr_hdl_t)mrhdl;
2659 return (IBT_SUCCESS);
2660 }
2661
2662 /* Physical Register Memory Region */
2663 /*
2664 * hermon_ci_register_physical_mr()
2665 */
2666 /* ARGSUSED */
2667 static ibt_status_t
hermon_ci_register_physical_mr(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_pmr_attr_t * mem_pattrs,void * ibtl_reserved,ibc_mr_hdl_t * mr_p,ibt_pmr_desc_t * mem_desc_p)2668 hermon_ci_register_physical_mr(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2669 ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved, ibc_mr_hdl_t *mr_p,
2670 ibt_pmr_desc_t *mem_desc_p)
2671 {
2672 return (IBT_NOT_SUPPORTED);
2673 }
2674
2675 /*
2676 * hermon_ci_reregister_physical_mr()
2677 */
2678 /* ARGSUSED */
2679 static ibt_status_t
hermon_ci_reregister_physical_mr(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr,ibc_pd_hdl_t pd,ibt_pmr_attr_t * mem_pattrs,void * ibtl_reserved,ibc_mr_hdl_t * mr_p,ibt_pmr_desc_t * mr_desc_p)2680 hermon_ci_reregister_physical_mr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr,
2681 ibc_pd_hdl_t pd, ibt_pmr_attr_t *mem_pattrs, void *ibtl_reserved,
2682 ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mr_desc_p)
2683 {
2684 return (IBT_NOT_SUPPORTED);
2685 }
2686
2687 /* Mellanox FMR Support */
2688 /*
2689 * hermon_ci_create_fmr_pool()
2690 * Creates a pool of memory regions suitable for FMR registration
2691 * Context: Can be called from base context only
2692 */
2693 static ibt_status_t
hermon_ci_create_fmr_pool(ibc_hca_hdl_t hca,ibc_pd_hdl_t pd,ibt_fmr_pool_attr_t * params,ibc_fmr_pool_hdl_t * fmr_pool_p)2694 hermon_ci_create_fmr_pool(ibc_hca_hdl_t hca, ibc_pd_hdl_t pd,
2695 ibt_fmr_pool_attr_t *params, ibc_fmr_pool_hdl_t *fmr_pool_p)
2696 {
2697 hermon_state_t *state;
2698 hermon_pdhdl_t pdhdl;
2699 hermon_fmrhdl_t fmrpoolhdl;
2700 int status;
2701
2702 state = (hermon_state_t *)hca;
2703
2704 /* Check for valid PD handle pointer */
2705 if (pd == NULL) {
2706 return (IBT_PD_HDL_INVALID);
2707 }
2708
2709 pdhdl = (hermon_pdhdl_t)pd;
2710
2711 /*
2712 * Validate the access flags. Both Remote Write and Remote Atomic
2713 * require the Local Write flag to be set
2714 */
2715 if (((params->fmr_flags & IBT_MR_ENABLE_REMOTE_WRITE) ||
2716 (params->fmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)) &&
2717 !(params->fmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)) {
2718 return (IBT_MR_ACCESS_REQ_INVALID);
2719 }
2720
2721 status = hermon_create_fmr_pool(state, pdhdl, params, &fmrpoolhdl);
2722 if (status != DDI_SUCCESS) {
2723 return (status);
2724 }
2725
2726 /* Set fmr_pool from hermon handle */
2727 *fmr_pool_p = (ibc_fmr_pool_hdl_t)fmrpoolhdl;
2728
2729 return (IBT_SUCCESS);
2730 }
2731
2732 /*
2733 * hermon_ci_destroy_fmr_pool()
2734 * Free all resources associated with an FMR pool.
2735 * Context: Can be called from base context only.
2736 */
2737 static ibt_status_t
hermon_ci_destroy_fmr_pool(ibc_hca_hdl_t hca,ibc_fmr_pool_hdl_t fmr_pool)2738 hermon_ci_destroy_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
2739 {
2740 hermon_state_t *state;
2741 hermon_fmrhdl_t fmrpoolhdl;
2742 int status;
2743
2744 state = (hermon_state_t *)hca;
2745 fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
2746
2747 status = hermon_destroy_fmr_pool(state, fmrpoolhdl);
2748 return (status);
2749 }
2750
2751 /*
2752 * hermon_ci_flush_fmr_pool()
2753 * Force a flush of the memory tables, cleaning up used FMR resources.
2754 * Context: Can be called from interrupt or base context.
2755 */
2756 static ibt_status_t
hermon_ci_flush_fmr_pool(ibc_hca_hdl_t hca,ibc_fmr_pool_hdl_t fmr_pool)2757 hermon_ci_flush_fmr_pool(ibc_hca_hdl_t hca, ibc_fmr_pool_hdl_t fmr_pool)
2758 {
2759 hermon_state_t *state;
2760 hermon_fmrhdl_t fmrpoolhdl;
2761 int status;
2762
2763 state = (hermon_state_t *)hca;
2764
2765 fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
2766 status = hermon_flush_fmr_pool(state, fmrpoolhdl);
2767 return (status);
2768 }
2769
2770 /*
2771 * hermon_ci_register_physical_fmr()
2772 * From the 'pool' of FMR regions passed in, performs register physical
2773 * operation.
2774 * Context: Can be called from interrupt or base context.
2775 */
2776 /* ARGSUSED */
2777 static ibt_status_t
hermon_ci_register_physical_fmr(ibc_hca_hdl_t hca,ibc_fmr_pool_hdl_t fmr_pool,ibt_pmr_attr_t * mem_pattr,void * ibtl_reserved,ibc_mr_hdl_t * mr_p,ibt_pmr_desc_t * mem_desc_p)2778 hermon_ci_register_physical_fmr(ibc_hca_hdl_t hca,
2779 ibc_fmr_pool_hdl_t fmr_pool, ibt_pmr_attr_t *mem_pattr,
2780 void *ibtl_reserved, ibc_mr_hdl_t *mr_p, ibt_pmr_desc_t *mem_desc_p)
2781 {
2782 hermon_state_t *state;
2783 hermon_mrhdl_t mrhdl;
2784 hermon_fmrhdl_t fmrpoolhdl;
2785 int status;
2786
2787 ASSERT(mem_pattr != NULL);
2788 ASSERT(mr_p != NULL);
2789 ASSERT(mem_desc_p != NULL);
2790
2791 /* Grab the Hermon softstate pointer */
2792 state = (hermon_state_t *)hca;
2793
2794 fmrpoolhdl = (hermon_fmrhdl_t)fmr_pool;
2795
2796 status = hermon_register_physical_fmr(state, fmrpoolhdl, mem_pattr,
2797 &mrhdl, mem_desc_p);
2798 if (status != DDI_SUCCESS) {
2799 return (status);
2800 }
2801
2802 /*
2803 * If region is mapped for streaming (i.e. noncoherent), then set
2804 * sync is required
2805 */
2806 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mem_desc_p))
2807 mem_desc_p->pmd_sync_required = (mrhdl->mr_bindinfo.bi_flags &
2808 IBT_MR_NONCOHERENT) ? B_TRUE : B_FALSE;
2809 if (mem_desc_p->pmd_sync_required == B_TRUE) {
2810 /* Fill in DMA handle for future sync operations */
2811 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(mrhdl->mr_bindinfo))
2812 mrhdl->mr_bindinfo.bi_dmahdl =
2813 (ddi_dma_handle_t)mem_pattr->pmr_ma;
2814 }
2815
2816 /* Return the Hermon MR handle */
2817 *mr_p = (ibc_mr_hdl_t)mrhdl;
2818
2819 return (IBT_SUCCESS);
2820 }
2821
2822 /*
2823 * hermon_ci_deregister_fmr()
2824 * Moves an FMR (specified by 'mr') to the deregistered state.
2825 * Context: Can be called from base context only.
2826 */
2827 static ibt_status_t
hermon_ci_deregister_fmr(ibc_hca_hdl_t hca,ibc_mr_hdl_t mr)2828 hermon_ci_deregister_fmr(ibc_hca_hdl_t hca, ibc_mr_hdl_t mr)
2829 {
2830 hermon_state_t *state;
2831 hermon_mrhdl_t mrhdl;
2832 int status;
2833
2834 /* Grab the Hermon softstate pointer */
2835 state = (hermon_state_t *)hca;
2836 mrhdl = (hermon_mrhdl_t)mr;
2837
2838 /*
2839 * Deregister the memory region, either "unmap" the FMR or deregister
2840 * the normal memory region.
2841 */
2842 status = hermon_deregister_fmr(state, mrhdl);
2843 return (status);
2844 }
2845
2846 static int
hermon_mem_alloc(hermon_state_t * state,size_t size,ibt_mr_flags_t flags,caddr_t * kaddrp,ibc_mem_alloc_hdl_t * mem_hdl)2847 hermon_mem_alloc(hermon_state_t *state, size_t size, ibt_mr_flags_t flags,
2848 caddr_t *kaddrp, ibc_mem_alloc_hdl_t *mem_hdl)
2849 {
2850 ddi_dma_handle_t dma_hdl;
2851 ddi_dma_attr_t dma_attr;
2852 ddi_acc_handle_t acc_hdl;
2853 size_t real_len;
2854 int status;
2855 int (*ddi_cb)(caddr_t);
2856 ibc_mem_alloc_hdl_t mem_alloc_hdl;
2857
2858 hermon_dma_attr_init(state, &dma_attr);
2859
2860 ddi_cb = (flags & IBT_MR_NOSLEEP) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
2861
2862 /* Allocate a DMA handle */
2863 status = ddi_dma_alloc_handle(state->hs_dip, &dma_attr, ddi_cb,
2864 NULL, &dma_hdl);
2865 if (status != DDI_SUCCESS) {
2866 return (DDI_FAILURE);
2867 }
2868
2869 /* Allocate DMA memory */
2870 status = ddi_dma_mem_alloc(dma_hdl, size,
2871 &state->hs_reg_accattr, DDI_DMA_CONSISTENT, ddi_cb,
2872 NULL, kaddrp, &real_len, &acc_hdl);
2873 if (status != DDI_SUCCESS) {
2874 ddi_dma_free_handle(&dma_hdl);
2875 return (DDI_FAILURE);
2876 }
2877
2878 /* Package the hermon_dma_info contents and return */
2879 mem_alloc_hdl = kmem_alloc(sizeof (**mem_hdl),
2880 (flags & IBT_MR_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP);
2881 if (mem_alloc_hdl == NULL) {
2882 ddi_dma_mem_free(&acc_hdl);
2883 ddi_dma_free_handle(&dma_hdl);
2884 return (DDI_FAILURE);
2885 }
2886 mem_alloc_hdl->ibc_dma_hdl = dma_hdl;
2887 mem_alloc_hdl->ibc_acc_hdl = acc_hdl;
2888
2889 *mem_hdl = mem_alloc_hdl;
2890
2891 return (DDI_SUCCESS);
2892 }
2893
2894 /*
2895 * hermon_ci_alloc_io_mem()
2896 * Allocate dma-able memory
2897 *
2898 */
2899 static ibt_status_t
hermon_ci_alloc_io_mem(ibc_hca_hdl_t hca,size_t size,ibt_mr_flags_t mr_flag,caddr_t * kaddrp,ibc_mem_alloc_hdl_t * mem_alloc_hdl_p)2900 hermon_ci_alloc_io_mem(ibc_hca_hdl_t hca, size_t size, ibt_mr_flags_t mr_flag,
2901 caddr_t *kaddrp, ibc_mem_alloc_hdl_t *mem_alloc_hdl_p)
2902 {
2903 hermon_state_t *state;
2904 int status;
2905
2906 /* Grab the Hermon softstate pointer and mem handle */
2907 state = (hermon_state_t *)hca;
2908
2909 /* Allocate the memory and handles */
2910 status = hermon_mem_alloc(state, size, mr_flag, kaddrp,
2911 mem_alloc_hdl_p);
2912
2913 if (status != DDI_SUCCESS) {
2914 *mem_alloc_hdl_p = NULL;
2915 *kaddrp = NULL;
2916 return (status);
2917 }
2918
2919 return (IBT_SUCCESS);
2920 }
2921
2922
2923 /*
2924 * hermon_ci_free_io_mem()
2925 * Unbind handl and free the memory
2926 */
2927 /* ARGSUSED */
2928 static ibt_status_t
hermon_ci_free_io_mem(ibc_hca_hdl_t hca,ibc_mem_alloc_hdl_t mem_alloc_hdl)2929 hermon_ci_free_io_mem(ibc_hca_hdl_t hca, ibc_mem_alloc_hdl_t mem_alloc_hdl)
2930 {
2931 /* Unbind the handles and free the memory */
2932 (void) ddi_dma_unbind_handle(mem_alloc_hdl->ibc_dma_hdl);
2933 ddi_dma_mem_free(&mem_alloc_hdl->ibc_acc_hdl);
2934 ddi_dma_free_handle(&mem_alloc_hdl->ibc_dma_hdl);
2935 kmem_free(mem_alloc_hdl, sizeof (*mem_alloc_hdl));
2936
2937 return (IBT_SUCCESS);
2938 }
2939