1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25
26 #include <sys/ib/ibtl/impl/ibtl.h>
27
28 /*
29 * ibtl_mem.c
30 * These routines implement all of the Memory Region verbs and the alloc/
31 * query/free Memory Window verbs at the TI interface.
32 */
33
34 static char ibtl_mem[] = "ibtl_mem";
35
36 /*
37 * Function:
38 * ibt_register_mr()
39 * Input:
40 * hca_hdl - HCA Handle.
41 * pd - Protection Domain Handle.
42 * mem_attr - Requested memory region attributes.
43 * Output:
44 * mr_hdl_p - The returned IBT memory region handle.
45 * mem_desc - Returned memory descriptor.
46 * Returns:
47 * IBT_SUCCESS
48 * IBT_CHAN_HDL_INVALID
49 * IBT_MR_VA_INVALID
50 * IBT_MR_LEN_INVALID
51 * IBT_MR_ACCESS_REQ_INVALID
52 * IBT_PD_HDL_INVALID
53 * IBT_INSUFF_RESOURCE
54 * Description:
55 * Prepares a virtually addressed memory region for use by a HCA. A
56 * description of the registered memory suitable for use in Work Requests
57 * (WRs) is returned in the ibt_mr_desc_t parameter.
58 */
59 ibt_status_t
ibt_register_mr(ibt_hca_hdl_t hca_hdl,ibt_pd_hdl_t pd,ibt_mr_attr_t * mem_attr,ibt_mr_hdl_t * mr_hdl_p,ibt_mr_desc_t * mem_desc)60 ibt_register_mr(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd, ibt_mr_attr_t *mem_attr,
61 ibt_mr_hdl_t *mr_hdl_p, ibt_mr_desc_t *mem_desc)
62 {
63 ib_vaddr_t vaddr;
64 ibt_status_t status;
65
66 IBTF_DPRINTF_L3(ibtl_mem, "ibt_register_mr(%p, %p, %p)",
67 hca_hdl, pd, mem_attr);
68
69 vaddr = mem_attr->mr_vaddr;
70
71 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_register_mr(
72 IBTL_HCA2CIHCA(hca_hdl), pd, mem_attr, IBTL_HCA2CLNT(hca_hdl),
73 mr_hdl_p, mem_desc);
74 if (status == IBT_SUCCESS) {
75 mem_desc->md_vaddr = vaddr;
76 atomic_inc_32(&hca_hdl->ha_mr_cnt);
77 }
78
79 return (status);
80 }
81
82
83 /*
84 * Function:
85 * ibt_register_buf()
86 * Input:
87 * hca_hdl HCA Handle.
88 * pd Protection Domain Handle.
89 * mem_bpattr Memory Registration attributes (IOVA and flags).
90 * bp A pointer to a buf(9S) struct.
91 * Output:
92 * mr_hdl_p The returned IBT memory region handle.
93 * mem_desc Returned memory descriptor.
94 * Returns:
95 * IBT_SUCCESS
96 * IBT_CHAN_HDL_INVALID
97 * IBT_MR_VA_INVALID
98 * IBT_MR_LEN_INVALID
99 * IBT_MR_ACCESS_REQ_INVALID
100 * IBT_PD_HDL_INVALID
101 * IBT_INSUFF_RESOURCE
102 * Description:
103 * Prepares a memory region described by a buf(9S) struct for use by a HCA.
104 * A description of the registered memory suitable for use in
105 * Work Requests (WRs) is returned in the ibt_mr_desc_t parameter.
106 */
107 ibt_status_t
ibt_register_buf(ibt_hca_hdl_t hca_hdl,ibt_pd_hdl_t pd,ibt_smr_attr_t * mem_bpattr,struct buf * bp,ibt_mr_hdl_t * mr_hdl_p,ibt_mr_desc_t * mem_desc)108 ibt_register_buf(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd,
109 ibt_smr_attr_t *mem_bpattr, struct buf *bp, ibt_mr_hdl_t *mr_hdl_p,
110 ibt_mr_desc_t *mem_desc)
111 {
112 ibt_status_t status;
113
114 IBTF_DPRINTF_L3(ibtl_mem, "ibt_register_buf(%p, %p, %p, %p)",
115 hca_hdl, pd, mem_bpattr, bp);
116
117 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_register_buf(
118 IBTL_HCA2CIHCA(hca_hdl), pd, mem_bpattr, bp, IBTL_HCA2CLNT(hca_hdl),
119 mr_hdl_p, mem_desc);
120 if (status == IBT_SUCCESS) {
121 atomic_inc_32(&hca_hdl->ha_mr_cnt);
122 }
123
124 return (status);
125 }
126
127
128 /*
129 * Function:
130 * ibt_query_mr()
131 * Input:
132 * hca_hdl - HCA Handle.
133 * mr_hdl - The IBT Memory Region handle.
134 * Output:
135 * attr - The pointer to Memory region attributes structure.
136 * Returns:
137 * IBT_SUCCESS
138 * IBT_CHAN_HDL_INVALID
139 * IBT_MR_HDL_INVALID
140 * Description:
141 * Retrieves information about a specified memory region.
142 */
143 ibt_status_t
ibt_query_mr(ibt_hca_hdl_t hca_hdl,ibt_mr_hdl_t mr_hdl,ibt_mr_query_attr_t * attr)144 ibt_query_mr(ibt_hca_hdl_t hca_hdl, ibt_mr_hdl_t mr_hdl,
145 ibt_mr_query_attr_t *attr)
146 {
147 IBTF_DPRINTF_L3(ibtl_mem, "ibt_query_mr(%p, %p)", hca_hdl, mr_hdl);
148
149 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_query_mr(
150 IBTL_HCA2CIHCA(hca_hdl), mr_hdl, attr));
151 }
152
153
154 /*
155 * Function:
156 * ibt_deregister_mr()
157 * Input:
158 * hca_hdl - HCA Handle.
159 * mr_hdl - The IBT Memory Region handle.
160 * Output:
161 * none.
162 * Returns:
163 * IBT_SUCCESS
164 * IBT_CHAN_HDL_INVALID
165 * IBT_MR_HDL_INVALID
166 * IBT_MR_IN_USE
167 * Description:
168 * De-register the registered memory region. Remove a memory region from a
169 * HCA translation table, and free all resources associated with the
170 * memory region.
171 */
172 ibt_status_t
ibt_deregister_mr(ibt_hca_hdl_t hca_hdl,ibt_mr_hdl_t mr_hdl)173 ibt_deregister_mr(ibt_hca_hdl_t hca_hdl, ibt_mr_hdl_t mr_hdl)
174 {
175 ibt_status_t status;
176
177 IBTF_DPRINTF_L3(ibtl_mem, "ibt_deregister_mr(%p, %p)", hca_hdl, mr_hdl);
178
179 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_deregister_mr(
180 IBTL_HCA2CIHCA(hca_hdl), mr_hdl);
181 if (status == IBT_SUCCESS) {
182 atomic_dec_32(&hca_hdl->ha_mr_cnt);
183 }
184 return (status);
185 }
186
187
188 /*
189 * Function:
190 * ibt_reregister_mr()
191 * Input:
192 * hca_hdl - HCA Handle.
193 * mr_hdl - The IBT Memory Region handle.
194 * pd - Optional Protection Domain Handle.
195 * mem_attr - Requested memory region attributes.
196 * Output:
197 * mr_hdl_p - The reregistered IBT memory region handle.
198 * mem_desc - Returned memory descriptor for the new memory region.
199 * Returns:
200 * IBT_SUCCESS
201 * IBT_CHAN_HDL_INVALID
202 * IBT_MR_HDL_INVALID
203 * IBT_MR_VA_INVALID
204 * IBT_MR_LEN_INVALID
205 * IBT_MR_ACCESS_REQ_INVALID
206 * IBT_PD_HDL_INVALID
207 * IBT_INSUFF_RESOURCE
208 * IBT_MR_IN_USE
209 * Description:
210 * Modify the attributes of an existing memory region.
211 */
212 ibt_status_t
ibt_reregister_mr(ibt_hca_hdl_t hca_hdl,ibt_mr_hdl_t mr_hdl,ibt_pd_hdl_t pd,ibt_mr_attr_t * mem_attr,ibt_mr_hdl_t * mr_hdl_p,ibt_mr_desc_t * mem_desc)213 ibt_reregister_mr(ibt_hca_hdl_t hca_hdl, ibt_mr_hdl_t mr_hdl, ibt_pd_hdl_t pd,
214 ibt_mr_attr_t *mem_attr, ibt_mr_hdl_t *mr_hdl_p, ibt_mr_desc_t *mem_desc)
215 {
216 ibt_status_t status;
217 ib_vaddr_t vaddr = mem_attr->mr_vaddr;
218
219 IBTF_DPRINTF_L3(ibtl_mem, "ibt_reregister_mr(%p, %p, %p, %p)",
220 hca_hdl, mr_hdl, pd, mem_attr);
221
222 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_reregister_mr(
223 IBTL_HCA2CIHCA(hca_hdl), mr_hdl, pd, mem_attr,
224 IBTL_HCA2CLNT(hca_hdl), mr_hdl_p, mem_desc);
225
226 if (status == IBT_SUCCESS)
227 mem_desc->md_vaddr = vaddr;
228 else if (!(status == IBT_MR_IN_USE || status == IBT_HCA_HDL_INVALID ||
229 status == IBT_MR_HDL_INVALID)) {
230
231 IBTF_DPRINTF_L2(ibtl_mem, "ibt_reregister_mr: "
232 "Re-registration Failed: %d", status);
233
234 /* we lost one memory region resource */
235 atomic_dec_32(&hca_hdl->ha_mr_cnt);
236 }
237
238 return (status);
239 }
240
241
242 /*
243 * Function:
244 * ibt_reregister_buf()
245 * Input:
246 * hca_hdl HCA Handle.
247 * mr_hdl The IBT Memory Region handle.
248 * pd Optional Protection Domain Handle.
249 * mem_bpattr Memory Registration attributes (IOVA and flags).
250 * bp A pointer to a buf(9S) struct.
251 * Output:
252 * mr_hdl_p The reregistered IBT memory region handle.
253 * mem_desc Returned memory descriptor for the new memory region.
254 * Returns:
255 * IBT_SUCCESS
256 * IBT_CHAN_HDL_INVALID
257 * IBT_MR_HDL_INVALID
258 * IBT_MR_VA_INVALID
259 * IBT_MR_LEN_INVALID
260 * IBT_MR_ACCESS_REQ_INVALID
261 * IBT_PD_HDL_INVALID
262 * IBT_INSUFF_RESOURCE
263 * IBT_MR_IN_USE
264 * Description:
265 * Modify the attributes of an existing memory region as described by a
266 * buf(9S) struct for use by a HCA. A description of the registered
267 * memory suitable for use in Work Requests (WRs) is returned in the
268 * ibt_mr_desc_t parameter.
269 */
270 ibt_status_t
ibt_reregister_buf(ibt_hca_hdl_t hca_hdl,ibt_mr_hdl_t mr_hdl,ibt_pd_hdl_t pd,ibt_smr_attr_t * mem_bpattr,struct buf * bp,ibt_mr_hdl_t * mr_hdl_p,ibt_mr_desc_t * mem_desc)271 ibt_reregister_buf(ibt_hca_hdl_t hca_hdl, ibt_mr_hdl_t mr_hdl,
272 ibt_pd_hdl_t pd, ibt_smr_attr_t *mem_bpattr, struct buf *bp,
273 ibt_mr_hdl_t *mr_hdl_p, ibt_mr_desc_t *mem_desc)
274 {
275 ibt_status_t status;
276
277 IBTF_DPRINTF_L3(ibtl_mem, "ibt_reregister_buf(%p, %p, %p, %p, %p)",
278 hca_hdl, mr_hdl, pd, mem_bpattr, bp);
279
280 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_reregister_buf(
281 IBTL_HCA2CIHCA(hca_hdl), mr_hdl, pd, mem_bpattr, bp,
282 IBTL_HCA2CLNT(hca_hdl), mr_hdl_p, mem_desc);
283
284 if (!(status == IBT_SUCCESS || status == IBT_MR_IN_USE ||
285 status == IBT_HCA_HDL_INVALID || status == IBT_MR_HDL_INVALID)) {
286
287 IBTF_DPRINTF_L2(ibtl_mem, "ibt_reregister_buf: "
288 "Re-registration Mem Failed: %d", status);
289
290 /* we lost one memory region resource */
291 atomic_dec_32(&hca_hdl->ha_mr_cnt);
292 }
293 return (status);
294 }
295
296
297 /*
298 * Function:
299 * ibt_register_shared_mr()
300 * Input:
301 * hca_hdl - HCA Handle.
302 * mr_hdl - The IBT Memory Region handle.
303 * pd - Protection Domain Handle.
304 * mem_sattr - Requested memory region shared attributes.
305 * Output:
306 * mr_hdl_p - The reregistered IBT memory region handle.
307 * mem_desc - Returned memory descriptor for the new memory region.
308 * Returns:
309 * IBT_SUCCESS
310 * IBT_INSUFF_RESOURCE
311 * IBT_CHAN_HDL_INVALID
312 * IBT_MR_HDL_INVALID
313 * IBT_PD_HDL_INVALID
314 * IBT_MR_ACCESS_REQ_INVALID
315 * Description:
316 * Given an existing memory region, a new memory region associated with
317 * the same physical locations is created.
318 */
319 ibt_status_t
ibt_register_shared_mr(ibt_hca_hdl_t hca_hdl,ibt_mr_hdl_t mr_hdl,ibt_pd_hdl_t pd,ibt_smr_attr_t * mem_sattr,ibt_mr_hdl_t * mr_hdl_p,ibt_mr_desc_t * mem_desc)320 ibt_register_shared_mr(ibt_hca_hdl_t hca_hdl, ibt_mr_hdl_t mr_hdl,
321 ibt_pd_hdl_t pd, ibt_smr_attr_t *mem_sattr, ibt_mr_hdl_t *mr_hdl_p,
322 ibt_mr_desc_t *mem_desc)
323 {
324 ibt_status_t status;
325
326 IBTF_DPRINTF_L3(ibtl_mem, "ibt_register_shared_mr(%p, %p, %p, %p)",
327 hca_hdl, mr_hdl, pd, mem_sattr);
328
329 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_register_shared_mr(
330 IBTL_HCA2CIHCA(hca_hdl), mr_hdl, pd, mem_sattr,
331 IBTL_HCA2CLNT(hca_hdl), mr_hdl_p, mem_desc);
332 if (status == IBT_SUCCESS) {
333 atomic_inc_32(&hca_hdl->ha_mr_cnt);
334 }
335 return (status);
336 }
337
338 /*
339 * Function:
340 * ibt_sync_mr()
341 * Input:
342 * hca_hdl - HCA Handle.
343 * mr_segments - A pointer to an array of ibt_mr_sync_t that describes
344 * the memory regions to sync.
345 * num_segments - The length of the mr_segments array.
346 * Output:
347 * NONE
348 * Returns:
349 * IBT_SUCCESS
350 * IBT_HCA_HDL_INVALID
351 * IBT_MR_HDL_INVALID
352 * IBT_INVALID_PARAM
353 * IBT_MR_VA_INVALID
354 * IBT_MR_LEN_INVALID
355 * Description:
356 * Make memory changes visible to incoming RDMA reads, or make the affects
357 * of an incoming RDMA writes visible to the consumer.
358 */
359 ibt_status_t
ibt_sync_mr(ibt_hca_hdl_t hca_hdl,ibt_mr_sync_t * mr_segments,size_t num_segments)360 ibt_sync_mr(ibt_hca_hdl_t hca_hdl, ibt_mr_sync_t *mr_segments,
361 size_t num_segments)
362
363 {
364 IBTF_DPRINTF_L3(ibtl_mem, "ibt_sync_mr(%p, %p, %d)", hca_hdl,
365 mr_segments, num_segments);
366
367 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_sync_mr(
368 IBTL_HCA2CIHCA(hca_hdl), mr_segments, num_segments));
369 }
370
371
372 /*
373 * Function:
374 * ibt_alloc_mw()
375 * Input:
376 * hca_hdl - HCA Handle.
377 * pd - Protection Domain Handle.
378 * flags - Memory Window alloc flags.
379 * Output:
380 * mw_hdl_p - The returned IBT Memory Window handle.
381 * rkey - The IBT R_Key handle.
382 * Returns:
383 * IBT_SUCCESS
384 * IBT_INSUFF_RESOURCE
385 * IBT_CHAN_HDL_INVALID
386 * IBT_PD_HDL_INVALID
387 * Description:
388 * Allocate a memory window from the HCA.
389 */
390 ibt_status_t
ibt_alloc_mw(ibt_hca_hdl_t hca_hdl,ibt_pd_hdl_t pd,ibt_mw_flags_t flags,ibt_mw_hdl_t * mw_hdl_p,ibt_rkey_t * rkey)391 ibt_alloc_mw(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd, ibt_mw_flags_t flags,
392 ibt_mw_hdl_t *mw_hdl_p, ibt_rkey_t *rkey)
393 {
394 ibt_status_t status;
395
396 IBTF_DPRINTF_L3(ibtl_mem, "ibt_alloc_mw(%p, %p, 0x%x)",
397 hca_hdl, pd, flags);
398
399 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_alloc_mw(
400 IBTL_HCA2CIHCA(hca_hdl), pd, flags, mw_hdl_p, rkey);
401
402 /*
403 * XXX - We should be able to allocate state and have a IBTF Memory
404 * Window Handle. Memory Windows are meant to be rebound on the fly
405 * (using a post) to make them fast. It is expected that alloc memory
406 * window will be done in a relatively static manner. But, we don't have
407 * a good reason to have local MW state at this point, so we won't.
408 */
409 if (status == IBT_SUCCESS) {
410 atomic_inc_32(&hca_hdl->ha_mw_cnt);
411 }
412 return (status);
413 }
414
415
416 /*
417 * Function:
418 * ibt_query_mw()
419 * Input:
420 * hca_hdl - HCA Handle.
421 * mw_hdl - The IBT Memory Window handle.
422 * Output:
423 * pd - Protection Domain Handle.
424 * rkey - The IBT R_Key handle.
425 * Returns:
426 * IBT_SUCCESS
427 * IBT_CHAN_HDL_INVALID
428 * IBT_MW_HDL_INVALID
429 * Description:
430 * Retrieves information about a specified memory region.
431 */
432 ibt_status_t
ibt_query_mw(ibt_hca_hdl_t hca_hdl,ibt_mw_hdl_t mw_hdl,ibt_mw_query_attr_t * mw_attr_p)433 ibt_query_mw(ibt_hca_hdl_t hca_hdl, ibt_mw_hdl_t mw_hdl,
434 ibt_mw_query_attr_t *mw_attr_p)
435 {
436 IBTF_DPRINTF_L3(ibtl_mem, "ibt_query_mw(%p, %p)", hca_hdl, mw_hdl);
437
438 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_query_mw(
439 IBTL_HCA2CIHCA(hca_hdl), mw_hdl, mw_attr_p));
440 }
441
442
443 /*
444 * Function:
445 * ibt_free_mw()
446 * Input:
447 * hca_hdl - HCA Handle
448 * mw_hdl - The IBT Memory Window handle.
449 * Output:
450 * none.
451 * Returns:
452 * IBT_SUCCESS
453 * IBT_CHAN_HDL_INVALID
454 * IBT_MW_HDL_INVALID
455 * Description:
456 * De-allocate the Memory Window.
457 */
458 ibt_status_t
ibt_free_mw(ibt_hca_hdl_t hca_hdl,ibt_mw_hdl_t mw_hdl)459 ibt_free_mw(ibt_hca_hdl_t hca_hdl, ibt_mw_hdl_t mw_hdl)
460 {
461 ibt_status_t status;
462
463 IBTF_DPRINTF_L3(ibtl_mem, "ibt_free_mw(%p, %p)", hca_hdl, mw_hdl);
464
465 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_free_mw(
466 IBTL_HCA2CIHCA(hca_hdl), mw_hdl);
467
468 if (status == IBT_SUCCESS) {
469 atomic_dec_32(&hca_hdl->ha_mw_cnt);
470 }
471 return (status);
472 }
473
474
475 /*
476 * Function:
477 * ibt_map_mem_area()
478 * Input:
479 * hca_hdl HCA Handle
480 * va_attrs A pointer to an ibt_va_attr_t that describes the
481 * VA to be translated.
482 * paddr_list_len The number of entries in the 'paddr_list_p' array.
483 * Output:
484 * paddr_list_p Array of ibt_phys_buf_t (allocated by the caller),
485 * in which the physical buffers that map the virtual
486 * buffer are returned.
487 * num_paddr_p The actual number of ibt_phys_buf_t that were
488 * returned in the 'paddr_list_p' array.
489 * ma_hdl_p Memory Area Handle.
490 * Returns:
491 * IBT_SUCCESS
492 * Description:
493 * Translate a kernel virtual address range into HCA physical addresses.
494 * A set of physical addresses, that can be used with "Reserved L_Key",
495 * register physical, and "Fast Registration Work Request" operations
496 * is returned.
497 */
498 ibt_status_t
ibt_map_mem_area(ibt_hca_hdl_t hca_hdl,ibt_va_attr_t * va_attrs,uint_t paddr_list_len,ibt_reg_req_t * reg_req,ibt_ma_hdl_t * ma_hdl_p)499 ibt_map_mem_area(ibt_hca_hdl_t hca_hdl, ibt_va_attr_t *va_attrs,
500 uint_t paddr_list_len, ibt_reg_req_t *reg_req, ibt_ma_hdl_t *ma_hdl_p)
501 {
502 ibt_status_t status;
503
504 IBTF_DPRINTF_L3(ibtl_mem, "ibt_map_mem_area(%p, %p, %d)",
505 hca_hdl, va_attrs, paddr_list_len);
506
507 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_map_mem_area(
508 IBTL_HCA2CIHCA(hca_hdl), va_attrs,
509 NULL, /* IBTL_HCA2MODI_P(hca_hdl)->mi_reserved */
510 paddr_list_len, reg_req, ma_hdl_p);
511 /* Not doing reference counting, which adversely effects performance */
512
513 return (status);
514 }
515
516
517 /*
518 * Function:
519 * ibt_unmap_mem_area()
520 * Input:
521 * hca_hdl HCA Handle
522 * ma_hdl Memory Area Handle.
523 * Output:
524 * None.
525 * Returns:
526 * IBT_SUCCESS
527 * Description:
528 * Un pin physical pages pinned during an ibt_map_mem_area() call.
529 */
530 ibt_status_t
ibt_unmap_mem_area(ibt_hca_hdl_t hca_hdl,ibt_ma_hdl_t ma_hdl)531 ibt_unmap_mem_area(ibt_hca_hdl_t hca_hdl, ibt_ma_hdl_t ma_hdl)
532 {
533 ibt_status_t status;
534
535 IBTF_DPRINTF_L3(ibtl_mem, "ibt_unmap_mem_area(%p, %p)",
536 hca_hdl, ma_hdl);
537
538 status = (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_unmap_mem_area(
539 IBTL_HCA2CIHCA(hca_hdl), ma_hdl));
540 /* Not doing reference counting, which adversely effects performance */
541
542 return (status);
543 }
544
545 /*
546 * Function:
547 * ibt_map_mem_iov()
548 * Input:
549 * hca_hdl HCA Handle
550 * iov_attr A pointer to an ibt_iov_attr_t that describes the
551 * virtual ranges to be translated.
552 * Output:
553 * wr A pointer to the work request where the output
554 * sgl (reserved_lkey, size, paddr) will be written.
555 * mi_hdl_p Memory IOV Handle.
556 * Returns:
557 * IBT_SUCCESS
558 * Description:
559 * Translate an array of virtual address ranges into HCA physical
560 * addresses, sizes, and reserved_lkey.
561 */
562 ibt_status_t
ibt_map_mem_iov(ibt_hca_hdl_t hca_hdl,ibt_iov_attr_t * iov_attr,ibt_all_wr_t * wr,ibt_mi_hdl_t * mi_hdl_p)563 ibt_map_mem_iov(ibt_hca_hdl_t hca_hdl, ibt_iov_attr_t *iov_attr,
564 ibt_all_wr_t *wr, ibt_mi_hdl_t *mi_hdl_p)
565 {
566 ibt_status_t status;
567
568 IBTF_DPRINTF_L3(ibtl_mem, "ibt_map_mem_iov(%p, %p, %p)",
569 hca_hdl, iov_attr, wr);
570
571 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_map_mem_iov(
572 IBTL_HCA2CIHCA(hca_hdl), iov_attr, wr, mi_hdl_p);
573 /* Not doing reference counting, which adversely effects performance */
574
575 return (status);
576 }
577
578
579 /*
580 * Function:
581 * ibt_unmap_mem_iov()
582 * Input:
583 * hca_hdl HCA Handle
584 * mi_hdl Memory IOV Handle.
585 * Output:
586 * None.
587 * Returns:
588 * IBT_SUCCESS
589 * Description:
590 * Un pin physical pages pinned during an ibt_map_mem_iov() call.
591 */
592 ibt_status_t
ibt_unmap_mem_iov(ibt_hca_hdl_t hca_hdl,ibt_mi_hdl_t mi_hdl)593 ibt_unmap_mem_iov(ibt_hca_hdl_t hca_hdl, ibt_mi_hdl_t mi_hdl)
594 {
595 ibt_status_t status;
596
597 IBTF_DPRINTF_L3(ibtl_mem, "ibt_unmap_mem_iov(%p, %p)",
598 hca_hdl, mi_hdl);
599
600 status = (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_unmap_mem_iov(
601 IBTL_HCA2CIHCA(hca_hdl), mi_hdl));
602 /* Not doing reference counting, which adversely effects performance */
603
604 return (status);
605 }
606
607 /*
608 * Function:
609 * ibt_alloc_io_mem()
610 * Input:
611 * hca_hdl HCA Handle
612 * size Number of bytes to allocate
613 * mr_flag Possible values: IBT_MR_SLEEP, IBT_MR_NONCOHERENT
614 * Output:
615 * kaddrp Contains pointer to the virtual address of the
616 * memory allocated by this call. (Set to NULL if
617 * memory allocation fails).
618 * mem_alloc_hdl Memory access handle returned by ibt_mem_alloc()
619 *
620 * Returns:
621 * IBT_SUCCESS
622 * IBT_INSUFF_RESOURCE
623 * IBT_HCA_HDL_INVALID
624 * IBT_MR_ACCESS_REQ_INVALID
625 * IBT_INVALID_PARAM
626 * Description:
627 * Wrapper for ddi_dma_mem_alloc()
628 */
629 ibt_status_t
ibt_alloc_io_mem(ibt_hca_hdl_t hca_hdl,size_t size,ibt_mr_flags_t mr_flag,caddr_t * kaddrp,ibt_mem_alloc_hdl_t * mem_alloc_hdl)630 ibt_alloc_io_mem(ibt_hca_hdl_t hca_hdl, size_t size, ibt_mr_flags_t mr_flag,
631 caddr_t *kaddrp, ibt_mem_alloc_hdl_t *mem_alloc_hdl)
632 {
633 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_alloc_io_mem(
634 IBTL_HCA2CIHCA(hca_hdl), size, mr_flag, kaddrp,
635 (ibc_mem_alloc_hdl_t *)mem_alloc_hdl));
636 }
637
638 /*
639 * Function:
640 * ibt_free_io_mem()
641 * Input:
642 * hca_hdl HCA Handle
643 * mem_alloc_hdl Memory access handle returned by ibt_mem_alloc()
644 * Output:
645 * None
646 *
647 * Returns:
648 * IBT_SUCCESS
649 * Description:
650 * Wrapper for ddi_dma_mem_free()
651 */
652 ibt_status_t
ibt_free_io_mem(ibt_hca_hdl_t hca_hdl,ibt_mem_alloc_hdl_t mem_alloc_hdl)653 ibt_free_io_mem(ibt_hca_hdl_t hca_hdl, ibt_mem_alloc_hdl_t mem_alloc_hdl)
654 {
655 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_free_io_mem(
656 IBTL_HCA2CIHCA(hca_hdl), (ibc_mem_alloc_hdl_t)mem_alloc_hdl));
657 }
658
659 /*
660 * Function:
661 * ibt_alloc_lkey()
662 * Input:
663 * hca_hdl HCA Handle
664 * pd A protection domain handle.
665 * flags Access control.
666 * phys_buf_list_sz Requested size of Physical Buffer List (PBL)
667 * resources to be allocated.
668 * Output:
669 * mr_hdl_p The returned IBT memory region handle.
670 * mem_desc_p Returned memory descriptor.
671 * Returns:
672 * IBT_SUCCESS
673 * Description:
674 * Allocates physical buffer list resources for use in memory
675 * registrations.
676 */
677 ibt_status_t
ibt_alloc_lkey(ibt_hca_hdl_t hca_hdl,ibt_pd_hdl_t pd,ibt_lkey_flags_t flags,uint_t phys_buf_list_sz,ibt_mr_hdl_t * mr_hdl_p,ibt_pmr_desc_t * mem_desc_p)678 ibt_alloc_lkey(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd, ibt_lkey_flags_t flags,
679 uint_t phys_buf_list_sz, ibt_mr_hdl_t *mr_hdl_p,
680 ibt_pmr_desc_t *mem_desc_p)
681 {
682 ibt_status_t status;
683
684 IBTF_DPRINTF_L3(ibtl_mem, "ibt_alloc_lkey(%p, %p, 0x%X, %d)",
685 hca_hdl, pd, flags, phys_buf_list_sz);
686
687 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_alloc_lkey(
688 IBTL_HCA2CIHCA(hca_hdl), pd, flags, phys_buf_list_sz, mr_hdl_p,
689 mem_desc_p);
690 if (status == IBT_SUCCESS) {
691 atomic_inc_32(&hca_hdl->ha_mr_cnt);
692 }
693
694 return (status);
695 }
696
697
698 /*
699 * Function:
700 * ibt_register_phys_mr()
701 * Input:
702 * hca_hdl HCA Handle
703 * pd A protection domain handle.
704 * mem_pattr Requested memory region physical attributes.
705 * Output:
706 * mr_hdl_p The returned IBT memory region handle.
707 * mem_desc_p Returned memory descriptor.
708 * Returns:
709 * IBT_SUCCESS
710 * Description:
711 * Prepares a physically addressed memory region for use by a HCA.
712 */
713 ibt_status_t
ibt_register_phys_mr(ibt_hca_hdl_t hca_hdl,ibt_pd_hdl_t pd,ibt_pmr_attr_t * mem_pattr,ibt_mr_hdl_t * mr_hdl_p,ibt_pmr_desc_t * mem_desc_p)714 ibt_register_phys_mr(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd,
715 ibt_pmr_attr_t *mem_pattr, ibt_mr_hdl_t *mr_hdl_p,
716 ibt_pmr_desc_t *mem_desc_p)
717 {
718 ibt_status_t status;
719
720 IBTF_DPRINTF_L3(ibtl_mem, "ibt_register_phys_mr(%p, %p, %p)",
721 hca_hdl, pd, mem_pattr);
722
723 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_register_physical_mr(
724 IBTL_HCA2CIHCA(hca_hdl), pd, mem_pattr,
725 NULL, /* IBTL_HCA2MODI_P(hca_hdl)->mi_reserved */
726 mr_hdl_p, mem_desc_p);
727 if (status == IBT_SUCCESS) {
728 atomic_inc_32(&hca_hdl->ha_mr_cnt);
729 }
730
731 return (status);
732 }
733
734
735 /*
736 * Function:
737 * ibt_reregister_phys_mr()
738 * Input:
739 * hca_hdl HCA Handle
740 * mr_hdl The IBT memory region handle.
741 * pd A protection domain handle.
742 * mem_pattr Requested memory region physical attributes.
743 * Output:
744 * mr_hdl_p The returned IBT memory region handle.
745 * mem_desc_p Returned memory descriptor.
746 * Returns:
747 * IBT_SUCCESS
748 * Description:
749 * Prepares a physically addressed memory region for use by a HCA.
750 */
751 ibt_status_t
ibt_reregister_phys_mr(ibt_hca_hdl_t hca_hdl,ibt_mr_hdl_t mr_hdl,ibt_pd_hdl_t pd,ibt_pmr_attr_t * mem_pattr,ibt_mr_hdl_t * mr_hdl_p,ibt_pmr_desc_t * mem_desc_p)752 ibt_reregister_phys_mr(ibt_hca_hdl_t hca_hdl, ibt_mr_hdl_t mr_hdl,
753 ibt_pd_hdl_t pd, ibt_pmr_attr_t *mem_pattr, ibt_mr_hdl_t *mr_hdl_p,
754 ibt_pmr_desc_t *mem_desc_p)
755 {
756 ibt_status_t status;
757
758 IBTF_DPRINTF_L3(ibtl_mem, "ibt_reregister_phys_mr(%p, %p, %p, %p)",
759 hca_hdl, mr_hdl, pd, mem_pattr);
760
761 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_reregister_physical_mr(
762 IBTL_HCA2CIHCA(hca_hdl), mr_hdl, pd, mem_pattr,
763 NULL, /* IBTL_HCA2MODI_P(hca_hdl)->mi_reserved */
764 mr_hdl_p, mem_desc_p);
765
766 if (!(status == IBT_SUCCESS || status == IBT_MR_IN_USE ||
767 status == IBT_HCA_HDL_INVALID || status == IBT_MR_HDL_INVALID)) {
768 IBTF_DPRINTF_L2(ibtl_mem, "ibt_reregister_phys_mr: "
769 "Re-registration Mem Failed: %d", status);
770
771 /* we lost one memory region resource */
772 atomic_dec_32(&hca_hdl->ha_mr_cnt);
773
774 }
775 return (status);
776 }
777
778
779 /*
780 * Fast Memory Registration (FMR).
781 *
782 * ibt_create_fmr_pool
783 * Not fast-path.
784 * ibt_create_fmr_pool() verifies that the HCA supports FMR and allocates
785 * and initializes an "FMR pool". This pool contains state specific to
786 * this registration, including the watermark setting to determine when
787 * to sync, and the total number of FMR regions available within this pool.
788 *
789 */
790 ibt_status_t
ibt_create_fmr_pool(ibt_hca_hdl_t hca_hdl,ibt_pd_hdl_t pd,ibt_fmr_pool_attr_t * fmr_params,ibt_fmr_pool_hdl_t * fmr_pool_p)791 ibt_create_fmr_pool(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd,
792 ibt_fmr_pool_attr_t *fmr_params, ibt_fmr_pool_hdl_t *fmr_pool_p)
793 {
794 ibt_status_t status;
795
796 IBTF_DPRINTF_L3(ibtl_mem, "ibt_create_fmr_pool(%p, %p, %p)",
797 hca_hdl, pd, fmr_params);
798
799 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_create_fmr_pool(
800 IBTL_HCA2CIHCA(hca_hdl), pd, fmr_params, fmr_pool_p);
801 if (status != IBT_SUCCESS) {
802 *fmr_pool_p = NULL;
803 return (status);
804 }
805
806 /* Update the FMR resource count */
807 atomic_inc_32(&hca_hdl->ha_fmr_pool_cnt);
808
809 return (status);
810 }
811
812
813 /*
814 * ibt_destroy_fmr_pool
815 * ibt_destroy_fmr_pool() deallocates all of the FMR regions in a specific
816 * pool. All state and information regarding the pool are destroyed and
817 * returned as free space once again. No more use of FMR regions in this
818 * pool are possible without a subsequent call to ibt_create_fmr_pool().
819 */
820 ibt_status_t
ibt_destroy_fmr_pool(ibt_hca_hdl_t hca_hdl,ibt_fmr_pool_hdl_t fmr_pool)821 ibt_destroy_fmr_pool(ibt_hca_hdl_t hca_hdl, ibt_fmr_pool_hdl_t fmr_pool)
822 {
823 ibt_status_t status;
824
825 IBTF_DPRINTF_L3(ibtl_mem, "ibt_destroy_fmr_pool(%p, %p)",
826 hca_hdl, fmr_pool);
827
828 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_destroy_fmr_pool(
829 IBTL_HCA2CIHCA(hca_hdl), fmr_pool);
830 if (status != IBT_SUCCESS) {
831 IBTF_DPRINTF_L2(ibtl_mem, "ibt_destroy_fmr_pool: "
832 "CI FMR Pool destroy failed (%d)", status);
833 return (status);
834 }
835
836 atomic_dec_32(&hca_hdl->ha_fmr_pool_cnt);
837
838 return (status);
839 }
840
841 /*
842 * ibt_flush_fmr_pool
843 * ibt_flush_fmr_pool forces a flush to occur. At the client's request,
844 * any unmapped FMR regions (See 'ibt_deregister_mr())') are returned to
845 * a free state. This function allows for an asynchronous cleanup of
846 * formerly used FMR regions. Sync operation is also performed internally
847 * by HCA driver, when 'watermark' settings for the number of free FMR
848 * regions left in the "pool" is reached.
849 */
850 ibt_status_t
ibt_flush_fmr_pool(ibt_hca_hdl_t hca_hdl,ibt_fmr_pool_hdl_t fmr_pool)851 ibt_flush_fmr_pool(ibt_hca_hdl_t hca_hdl, ibt_fmr_pool_hdl_t fmr_pool)
852 {
853 IBTF_DPRINTF_L3(ibtl_mem, "ibt_flush_fmr_pool(%p, %p)",
854 hca_hdl, fmr_pool);
855
856 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_flush_fmr_pool(
857 IBTL_HCA2CIHCA(hca_hdl), fmr_pool));
858 }
859
860 /*
861 * ibt_register_physical_fmr
862 * ibt_register_physical_fmr() assigns a "free" entry from the FMR Pool.
863 * It first consults the "FMR cache" to see if this is a duplicate memory
864 * registration to something already in use. If not, then a free entry
865 * in the "pool" is marked used.
866 */
867 ibt_status_t
ibt_register_physical_fmr(ibt_hca_hdl_t hca_hdl,ibt_fmr_pool_hdl_t fmr_pool,ibt_pmr_attr_t * mem_pattr,ibt_mr_hdl_t * mr_hdl_p,ibt_pmr_desc_t * mem_desc_p)868 ibt_register_physical_fmr(ibt_hca_hdl_t hca_hdl, ibt_fmr_pool_hdl_t fmr_pool,
869 ibt_pmr_attr_t *mem_pattr, ibt_mr_hdl_t *mr_hdl_p,
870 ibt_pmr_desc_t *mem_desc_p)
871 {
872 IBTF_DPRINTF_L3(ibtl_mem, "ibt_register_physical_fmr(%p, %p, %p, %p)",
873 hca_hdl, fmr_pool, mem_pattr, mem_desc_p);
874
875 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_register_physical_fmr(
876 IBTL_HCA2CIHCA(hca_hdl), fmr_pool, mem_pattr,
877 NULL, /* IBTL_HCA2MODI_P(hca_hdl)->mi_reserved */
878 mr_hdl_p, mem_desc_p));
879 }
880
881 /*
882 * ibt_deregister_fmr
883 * The ibt_deregister_fmr un-maps the resources reserved from the FMR
884 * pool by ibt_register_physical_fmr(). The ibt_deregister_fmr() will
885 * mark the region as free in the FMR Pool.
886 */
887 ibt_status_t
ibt_deregister_fmr(ibt_hca_hdl_t hca,ibt_mr_hdl_t mr_hdl)888 ibt_deregister_fmr(ibt_hca_hdl_t hca, ibt_mr_hdl_t mr_hdl)
889 {
890 IBTF_DPRINTF_L3(ibtl_mem, "ibt_deregister_fmr(%p, %p)", hca, mr_hdl);
891
892 return (IBTL_HCA2CIHCAOPS_P(hca)->ibc_deregister_fmr(
893 IBTL_HCA2CIHCA(hca), mr_hdl));
894 }
895
896 /*
897 * ibt_register_dma_mr
898 */
899 ibt_status_t
ibt_register_dma_mr(ibt_hca_hdl_t hca,ibt_pd_hdl_t pd,ibt_dmr_attr_t * mem_attr,ibt_mr_hdl_t * mr_hdl_p,ibt_mr_desc_t * mem_desc)900 ibt_register_dma_mr(ibt_hca_hdl_t hca, ibt_pd_hdl_t pd,
901 ibt_dmr_attr_t *mem_attr, ibt_mr_hdl_t *mr_hdl_p, ibt_mr_desc_t *mem_desc)
902 {
903 ibt_status_t status;
904
905 IBTF_DPRINTF_L3(ibtl_mem, "ibt_register_dma_mr(%p, %p, %p)",
906 hca, pd, mem_attr);
907
908 status = IBTL_HCA2CIHCAOPS_P(hca)->ibc_register_dma_mr(
909 IBTL_HCA2CIHCA(hca), pd, mem_attr, NULL, mr_hdl_p, mem_desc);
910 if (status == IBT_SUCCESS) {
911 atomic_inc_32(&hca->ha_mr_cnt);
912 }
913 return (status);
914 }
915