1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 27 #include <sys/ib/ibtl/impl/ibtl.h> 28 29 /* 30 * ibtl_mem.c 31 * These routines implement all of the Memory Region verbs and the alloc/ 32 * query/free Memory Window verbs at the TI interface. 33 */ 34 35 static char ibtl_mem[] = "ibtl_mem"; 36 37 /* 38 * Function: 39 * ibt_register_mr() 40 * Input: 41 * hca_hdl - HCA Handle. 42 * pd - Protection Domain Handle. 43 * mem_attr - Requested memory region attributes. 44 * Output: 45 * mr_hdl_p - The returned IBT memory region handle. 46 * mem_desc - Returned memory descriptor. 47 * Returns: 48 * IBT_SUCCESS 49 * IBT_CHAN_HDL_INVALID 50 * IBT_MR_VA_INVALID 51 * IBT_MR_LEN_INVALID 52 * IBT_MR_ACCESS_REQ_INVALID 53 * IBT_PD_HDL_INVALID 54 * IBT_INSUFF_RESOURCE 55 * Description: 56 * Prepares a virtually addressed memory region for use by a HCA. A 57 * description of the registered memory suitable for use in Work Requests 58 * (WRs) is returned in the ibt_mr_desc_t parameter. 59 */ 60 ibt_status_t 61 ibt_register_mr(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd, ibt_mr_attr_t *mem_attr, 62 ibt_mr_hdl_t *mr_hdl_p, ibt_mr_desc_t *mem_desc) 63 { 64 ib_vaddr_t vaddr; 65 ibt_status_t status; 66 67 IBTF_DPRINTF_L3(ibtl_mem, "ibt_register_mr(%p, %p, %p)", 68 hca_hdl, pd, mem_attr); 69 70 vaddr = mem_attr->mr_vaddr; 71 72 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_register_mr( 73 IBTL_HCA2CIHCA(hca_hdl), pd, mem_attr, IBTL_HCA2CLNT(hca_hdl), 74 mr_hdl_p, mem_desc); 75 if (status == IBT_SUCCESS) { 76 mem_desc->md_vaddr = vaddr; 77 mutex_enter(&hca_hdl->ha_mutex); 78 hca_hdl->ha_mr_cnt++; 79 mutex_exit(&hca_hdl->ha_mutex); 80 } 81 82 return (status); 83 } 84 85 86 /* 87 * Function: 88 * ibt_register_buf() 89 * Input: 90 * hca_hdl HCA Handle. 91 * pd Protection Domain Handle. 92 * mem_bpattr Memory Registration attributes (IOVA and flags). 93 * bp A pointer to a buf(9S) struct. 94 * Output: 95 * mr_hdl_p The returned IBT memory region handle. 96 * mem_desc Returned memory descriptor. 97 * Returns: 98 * IBT_SUCCESS 99 * IBT_CHAN_HDL_INVALID 100 * IBT_MR_VA_INVALID 101 * IBT_MR_LEN_INVALID 102 * IBT_MR_ACCESS_REQ_INVALID 103 * IBT_PD_HDL_INVALID 104 * IBT_INSUFF_RESOURCE 105 * Description: 106 * Prepares a memory region described by a buf(9S) struct for use by a HCA. 107 * A description of the registered memory suitable for use in 108 * Work Requests (WRs) is returned in the ibt_mr_desc_t parameter. 109 */ 110 ibt_status_t 111 ibt_register_buf(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd, 112 ibt_smr_attr_t *mem_bpattr, struct buf *bp, ibt_mr_hdl_t *mr_hdl_p, 113 ibt_mr_desc_t *mem_desc) 114 { 115 ibt_status_t status; 116 117 IBTF_DPRINTF_L3(ibtl_mem, "ibt_register_buf(%p, %p, %p, %p)", 118 hca_hdl, pd, mem_bpattr, bp); 119 120 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_register_buf( 121 IBTL_HCA2CIHCA(hca_hdl), pd, mem_bpattr, bp, IBTL_HCA2CLNT(hca_hdl), 122 mr_hdl_p, mem_desc); 123 if (status == IBT_SUCCESS) { 124 mutex_enter(&hca_hdl->ha_mutex); 125 hca_hdl->ha_mr_cnt++; 126 mutex_exit(&hca_hdl->ha_mutex); 127 } 128 129 return (status); 130 } 131 132 133 /* 134 * Function: 135 * ibt_query_mr() 136 * Input: 137 * hca_hdl - HCA Handle. 138 * mr_hdl - The IBT Memory Region handle. 139 * Output: 140 * attr - The pointer to Memory region attributes structure. 141 * Returns: 142 * IBT_SUCCESS 143 * IBT_CHAN_HDL_INVALID 144 * IBT_MR_HDL_INVALID 145 * Description: 146 * Retrieves information about a specified memory region. 147 */ 148 ibt_status_t 149 ibt_query_mr(ibt_hca_hdl_t hca_hdl, ibt_mr_hdl_t mr_hdl, 150 ibt_mr_query_attr_t *attr) 151 { 152 IBTF_DPRINTF_L3(ibtl_mem, "ibt_query_mr(%p, %p)", hca_hdl, mr_hdl); 153 154 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_query_mr( 155 IBTL_HCA2CIHCA(hca_hdl), mr_hdl, attr)); 156 } 157 158 159 /* 160 * Function: 161 * ibt_deregister_mr() 162 * Input: 163 * hca_hdl - HCA Handle. 164 * mr_hdl - The IBT Memory Region handle. 165 * Output: 166 * none. 167 * Returns: 168 * IBT_SUCCESS 169 * IBT_CHAN_HDL_INVALID 170 * IBT_MR_HDL_INVALID 171 * IBT_MR_IN_USE 172 * Description: 173 * De-register the registered memory region. Remove a memory region from a 174 * HCA translation table, and free all resources associated with the 175 * memory region. 176 */ 177 ibt_status_t 178 ibt_deregister_mr(ibt_hca_hdl_t hca_hdl, ibt_mr_hdl_t mr_hdl) 179 { 180 ibt_status_t status; 181 182 IBTF_DPRINTF_L3(ibtl_mem, "ibt_deregister_mr(%p, %p)", hca_hdl, mr_hdl); 183 184 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_deregister_mr( 185 IBTL_HCA2CIHCA(hca_hdl), mr_hdl); 186 if (status == IBT_SUCCESS) { 187 mutex_enter(&hca_hdl->ha_mutex); 188 hca_hdl->ha_mr_cnt--; 189 mutex_exit(&hca_hdl->ha_mutex); 190 } 191 return (status); 192 } 193 194 195 /* 196 * Function: 197 * ibt_reregister_mr() 198 * Input: 199 * hca_hdl - HCA Handle. 200 * mr_hdl - The IBT Memory Region handle. 201 * pd - Optional Protection Domain Handle. 202 * mem_attr - Requested memory region attributes. 203 * Output: 204 * mr_hdl_p - The reregistered IBT memory region handle. 205 * mem_desc - Returned memory descriptor for the new memory region. 206 * Returns: 207 * IBT_SUCCESS 208 * IBT_CHAN_HDL_INVALID 209 * IBT_MR_HDL_INVALID 210 * IBT_MR_VA_INVALID 211 * IBT_MR_LEN_INVALID 212 * IBT_MR_ACCESS_REQ_INVALID 213 * IBT_PD_HDL_INVALID 214 * IBT_INSUFF_RESOURCE 215 * IBT_MR_IN_USE 216 * Description: 217 * Modify the attributes of an existing memory region. 218 */ 219 ibt_status_t 220 ibt_reregister_mr(ibt_hca_hdl_t hca_hdl, ibt_mr_hdl_t mr_hdl, ibt_pd_hdl_t pd, 221 ibt_mr_attr_t *mem_attr, ibt_mr_hdl_t *mr_hdl_p, ibt_mr_desc_t *mem_desc) 222 { 223 ibt_status_t status; 224 ib_vaddr_t vaddr = mem_attr->mr_vaddr; 225 226 IBTF_DPRINTF_L3(ibtl_mem, "ibt_reregister_mr(%p, %p, %p, %p)", 227 hca_hdl, mr_hdl, pd, mem_attr); 228 229 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_reregister_mr( 230 IBTL_HCA2CIHCA(hca_hdl), mr_hdl, pd, mem_attr, 231 IBTL_HCA2CLNT(hca_hdl), mr_hdl_p, mem_desc); 232 233 if (status == IBT_SUCCESS) 234 mem_desc->md_vaddr = vaddr; 235 else if (!(status == IBT_MR_IN_USE || status == IBT_HCA_HDL_INVALID || 236 status == IBT_MR_HDL_INVALID)) { 237 238 IBTF_DPRINTF_L2(ibtl_mem, "ibt_reregister_mr: " 239 "Re-registration Failed: %d", status); 240 241 /* we lost one memory region resource */ 242 mutex_enter(&hca_hdl->ha_mutex); 243 hca_hdl->ha_mr_cnt--; 244 mutex_exit(&hca_hdl->ha_mutex); 245 } 246 247 return (status); 248 } 249 250 251 /* 252 * Function: 253 * ibt_reregister_buf() 254 * Input: 255 * hca_hdl HCA Handle. 256 * mr_hdl The IBT Memory Region handle. 257 * pd Optional Protection Domain Handle. 258 * mem_bpattr Memory Registration attributes (IOVA and flags). 259 * bp A pointer to a buf(9S) struct. 260 * Output: 261 * mr_hdl_p The reregistered IBT memory region handle. 262 * mem_desc Returned memory descriptor for the new memory region. 263 * Returns: 264 * IBT_SUCCESS 265 * IBT_CHAN_HDL_INVALID 266 * IBT_MR_HDL_INVALID 267 * IBT_MR_VA_INVALID 268 * IBT_MR_LEN_INVALID 269 * IBT_MR_ACCESS_REQ_INVALID 270 * IBT_PD_HDL_INVALID 271 * IBT_INSUFF_RESOURCE 272 * IBT_MR_IN_USE 273 * Description: 274 * Modify the attributes of an existing memory region as described by a 275 * buf(9S) struct for use by a HCA. A description of the registered 276 * memory suitable for use in Work Requests (WRs) is returned in the 277 * ibt_mr_desc_t parameter. 278 */ 279 ibt_status_t 280 ibt_reregister_buf(ibt_hca_hdl_t hca_hdl, ibt_mr_hdl_t mr_hdl, 281 ibt_pd_hdl_t pd, ibt_smr_attr_t *mem_bpattr, struct buf *bp, 282 ibt_mr_hdl_t *mr_hdl_p, ibt_mr_desc_t *mem_desc) 283 { 284 ibt_status_t status; 285 286 IBTF_DPRINTF_L3(ibtl_mem, "ibt_reregister_buf(%p, %p, %p, %p, %p)", 287 hca_hdl, mr_hdl, pd, mem_bpattr, bp); 288 289 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_reregister_buf( 290 IBTL_HCA2CIHCA(hca_hdl), mr_hdl, pd, mem_bpattr, bp, 291 IBTL_HCA2CLNT(hca_hdl), mr_hdl_p, mem_desc); 292 293 if (!(status == IBT_SUCCESS || status == IBT_MR_IN_USE || 294 status == IBT_HCA_HDL_INVALID || status == IBT_MR_HDL_INVALID)) { 295 296 IBTF_DPRINTF_L2(ibtl_mem, "ibt_reregister_buf: " 297 "Re-registration Mem Failed: %d", status); 298 299 /* we lost one memory region resource */ 300 mutex_enter(&hca_hdl->ha_mutex); 301 hca_hdl->ha_mr_cnt--; 302 mutex_exit(&hca_hdl->ha_mutex); 303 } 304 return (status); 305 } 306 307 308 /* 309 * Function: 310 * ibt_register_shared_mr() 311 * Input: 312 * hca_hdl - HCA Handle. 313 * mr_hdl - The IBT Memory Region handle. 314 * pd - Protection Domain Handle. 315 * mem_sattr - Requested memory region shared attributes. 316 * Output: 317 * mr_hdl_p - The reregistered IBT memory region handle. 318 * mem_desc - Returned memory descriptor for the new memory region. 319 * Returns: 320 * IBT_SUCCESS 321 * IBT_INSUFF_RESOURCE 322 * IBT_CHAN_HDL_INVALID 323 * IBT_MR_HDL_INVALID 324 * IBT_PD_HDL_INVALID 325 * IBT_MR_ACCESS_REQ_INVALID 326 * Description: 327 * Given an existing memory region, a new memory region associated with 328 * the same physical locations is created. 329 */ 330 ibt_status_t 331 ibt_register_shared_mr(ibt_hca_hdl_t hca_hdl, ibt_mr_hdl_t mr_hdl, 332 ibt_pd_hdl_t pd, ibt_smr_attr_t *mem_sattr, ibt_mr_hdl_t *mr_hdl_p, 333 ibt_mr_desc_t *mem_desc) 334 { 335 ibt_status_t status; 336 337 IBTF_DPRINTF_L3(ibtl_mem, "ibt_register_shared_mr(%p, %p, %p, %p)", 338 hca_hdl, mr_hdl, pd, mem_sattr); 339 340 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_register_shared_mr( 341 IBTL_HCA2CIHCA(hca_hdl), mr_hdl, pd, mem_sattr, 342 IBTL_HCA2CLNT(hca_hdl), mr_hdl_p, mem_desc); 343 if (status == IBT_SUCCESS) { 344 mutex_enter(&hca_hdl->ha_mutex); 345 hca_hdl->ha_mr_cnt++; 346 mutex_exit(&hca_hdl->ha_mutex); 347 } 348 return (status); 349 } 350 351 /* 352 * Function: 353 * ibt_sync_mr() 354 * Input: 355 * hca_hdl - HCA Handle. 356 * mr_segments - A pointer to an array of ibt_mr_sync_t that describes 357 * the memory regions to sync. 358 * num_segments - The length of the mr_segments array. 359 * Output: 360 * NONE 361 * Returns: 362 * IBT_SUCCESS 363 * IBT_HCA_HDL_INVALID 364 * IBT_MR_HDL_INVALID 365 * IBT_INVALID_PARAM 366 * IBT_MR_VA_INVALID 367 * IBT_MR_LEN_INVALID 368 * Description: 369 * Make memory changes visible to incoming RDMA reads, or make the affects 370 * of an incoming RDMA writes visible to the consumer. 371 */ 372 ibt_status_t 373 ibt_sync_mr(ibt_hca_hdl_t hca_hdl, ibt_mr_sync_t *mr_segments, 374 size_t num_segments) 375 376 { 377 IBTF_DPRINTF_L3(ibtl_mem, "ibt_sync_mr(%p, %p, %d)", hca_hdl, 378 mr_segments, num_segments); 379 380 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_sync_mr( 381 IBTL_HCA2CIHCA(hca_hdl), mr_segments, num_segments)); 382 } 383 384 385 /* 386 * Function: 387 * ibt_alloc_mw() 388 * Input: 389 * hca_hdl - HCA Handle. 390 * pd - Protection Domain Handle. 391 * flags - Memory Window alloc flags. 392 * Output: 393 * mw_hdl_p - The returned IBT Memory Window handle. 394 * rkey - The IBT R_Key handle. 395 * Returns: 396 * IBT_SUCCESS 397 * IBT_INSUFF_RESOURCE 398 * IBT_CHAN_HDL_INVALID 399 * IBT_PD_HDL_INVALID 400 * Description: 401 * Allocate a memory window from the HCA. 402 */ 403 ibt_status_t 404 ibt_alloc_mw(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd, ibt_mw_flags_t flags, 405 ibt_mw_hdl_t *mw_hdl_p, ibt_rkey_t *rkey) 406 { 407 ibt_status_t status; 408 409 IBTF_DPRINTF_L3(ibtl_mem, "ibt_alloc_mw(%p, %p, 0x%x)", 410 hca_hdl, pd, flags); 411 412 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_alloc_mw( 413 IBTL_HCA2CIHCA(hca_hdl), pd, flags, mw_hdl_p, rkey); 414 415 /* 416 * XXX - We should be able to allocate state and have a IBTF Memory 417 * Window Handle. Memory Windows are meant to be rebound on the fly 418 * (using a post) to make them fast. It is expected that alloc memory 419 * window will be done in a relatively static manner. But, we don't have 420 * a good reason to have local MW state at this point, so we won't. 421 */ 422 if (status == IBT_SUCCESS) { 423 mutex_enter(&hca_hdl->ha_mutex); 424 hca_hdl->ha_mw_cnt++; 425 mutex_exit(&hca_hdl->ha_mutex); 426 } 427 return (status); 428 } 429 430 431 /* 432 * Function: 433 * ibt_query_mw() 434 * Input: 435 * hca_hdl - HCA Handle. 436 * mw_hdl - The IBT Memory Window handle. 437 * Output: 438 * pd - Protection Domain Handle. 439 * rkey - The IBT R_Key handle. 440 * Returns: 441 * IBT_SUCCESS 442 * IBT_CHAN_HDL_INVALID 443 * IBT_MW_HDL_INVALID 444 * Description: 445 * Retrieves information about a specified memory region. 446 */ 447 ibt_status_t 448 ibt_query_mw(ibt_hca_hdl_t hca_hdl, ibt_mw_hdl_t mw_hdl, 449 ibt_mw_query_attr_t *mw_attr_p) 450 { 451 IBTF_DPRINTF_L3(ibtl_mem, "ibt_query_mw(%p, %p)", hca_hdl, mw_hdl); 452 453 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_query_mw( 454 IBTL_HCA2CIHCA(hca_hdl), mw_hdl, mw_attr_p)); 455 } 456 457 458 /* 459 * Function: 460 * ibt_free_mw() 461 * Input: 462 * hca_hdl - HCA Handle 463 * mw_hdl - The IBT Memory Window handle. 464 * Output: 465 * none. 466 * Returns: 467 * IBT_SUCCESS 468 * IBT_CHAN_HDL_INVALID 469 * IBT_MW_HDL_INVALID 470 * Description: 471 * De-allocate the Memory Window. 472 */ 473 ibt_status_t 474 ibt_free_mw(ibt_hca_hdl_t hca_hdl, ibt_mw_hdl_t mw_hdl) 475 { 476 ibt_status_t status; 477 478 IBTF_DPRINTF_L3(ibtl_mem, "ibt_free_mw(%p, %p)", hca_hdl, mw_hdl); 479 480 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_free_mw( 481 IBTL_HCA2CIHCA(hca_hdl), mw_hdl); 482 483 if (status == IBT_SUCCESS) { 484 mutex_enter(&hca_hdl->ha_mutex); 485 hca_hdl->ha_mw_cnt--; 486 mutex_exit(&hca_hdl->ha_mutex); 487 } 488 return (status); 489 } 490 491 492 /* 493 * Function: 494 * ibt_map_mem_area() 495 * Input: 496 * hca_hdl HCA Handle 497 * va_attrs A pointer to an ibt_va_attr_t that describes the 498 * VA to be translated. 499 * paddr_list_len The number of entries in the 'paddr_list_p' array. 500 * Output: 501 * paddr_list_p Array of ibt_phys_buf_t (allocated by the caller), 502 * in which the physical buffers that map the virtual 503 * buffer are returned. 504 * num_paddr_p The actual number of ibt_phys_buf_t that were 505 * returned in the 'paddr_list_p' array. 506 * ma_hdl_p Memory Area Handle. 507 * Returns: 508 * IBT_SUCCESS 509 * Description: 510 * Translate a kernel virtual address range into HCA physical addresses. 511 * A set of physical addresses, that can be used with "Reserved L_Key", 512 * register physical, and "Fast Registration Work Request" operations 513 * is returned. 514 */ 515 ibt_status_t 516 ibt_map_mem_area(ibt_hca_hdl_t hca_hdl, ibt_va_attr_t *va_attrs, 517 uint_t paddr_list_len, ibt_phys_buf_t *paddr_list_p, uint_t *num_paddr_p, 518 size_t *paddr_bufsz_p, ib_memlen_t *paddr_offset_p, ibt_ma_hdl_t *ma_hdl_p) 519 { 520 ibt_status_t status; 521 522 IBTF_DPRINTF_L3(ibtl_mem, "ibt_map_mem_area(%p, %p, %d)", 523 hca_hdl, va_attrs, paddr_list_len); 524 525 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_map_mem_area( 526 IBTL_HCA2CIHCA(hca_hdl), va_attrs, 527 NULL, /* IBTL_HCA2MODI_P(hca_hdl)->mi_reserved */ 528 paddr_list_len, paddr_list_p, num_paddr_p, paddr_bufsz_p, 529 paddr_offset_p, ma_hdl_p); 530 if (status == IBT_SUCCESS) { 531 mutex_enter(&hca_hdl->ha_mutex); 532 hca_hdl->ha_ma_cnt++; 533 mutex_exit(&hca_hdl->ha_mutex); 534 } 535 536 return (status); 537 } 538 539 540 /* 541 * Function: 542 * ibt_unmap_mem_area() 543 * Input: 544 * hca_hdl HCA Handle 545 * ma_hdl Memory Area Handle. 546 * Output: 547 * None. 548 * Returns: 549 * IBT_SUCCESS 550 * Description: 551 * Un pin physical pages pinned during an ibt_map_mem_area() call. 552 */ 553 ibt_status_t 554 ibt_unmap_mem_area(ibt_hca_hdl_t hca_hdl, ibt_ma_hdl_t ma_hdl) 555 { 556 ibt_status_t status; 557 558 IBTF_DPRINTF_L3(ibtl_mem, "ibt_unmap_mem_area(%p, %p)", 559 hca_hdl, ma_hdl); 560 561 status = (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_unmap_mem_area( 562 IBTL_HCA2CIHCA(hca_hdl), ma_hdl)); 563 if (status == IBT_SUCCESS) { 564 mutex_enter(&hca_hdl->ha_mutex); 565 hca_hdl->ha_ma_cnt--; 566 mutex_exit(&hca_hdl->ha_mutex); 567 } 568 569 return (status); 570 } 571 572 /* 573 * Function: 574 * ibt_map_mem_iov() 575 * Input: 576 * hca_hdl HCA Handle 577 * iov_attr A pointer to an ibt_iov_attr_t that describes the 578 * virtual ranges to be translated. 579 * Output: 580 * wr A pointer to the work request where the output 581 * sgl (reserved_lkey, size, paddr) will be written. 582 * mi_hdl_p Memory IOV Handle. 583 * Returns: 584 * IBT_SUCCESS 585 * Description: 586 * Translate an array of virtual address ranges into HCA physical 587 * addresses, sizes, and reserved_lkey. 588 */ 589 ibt_status_t 590 ibt_map_mem_iov(ibt_hca_hdl_t hca_hdl, ibt_iov_attr_t *iov_attr, 591 ibt_all_wr_t *wr, ibt_mi_hdl_t *mi_hdl_p) 592 { 593 ibt_status_t status; 594 595 IBTF_DPRINTF_L3(ibtl_mem, "ibt_map_mem_iov(%p, %p, %p)", 596 hca_hdl, iov_attr, wr); 597 598 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_map_mem_iov( 599 IBTL_HCA2CIHCA(hca_hdl), iov_attr, wr, mi_hdl_p); 600 if (status == IBT_SUCCESS) { 601 mutex_enter(&hca_hdl->ha_mutex); 602 hca_hdl->ha_ma_cnt++; 603 mutex_exit(&hca_hdl->ha_mutex); 604 } 605 606 return (status); 607 } 608 609 610 /* 611 * Function: 612 * ibt_unmap_mem_iov() 613 * Input: 614 * hca_hdl HCA Handle 615 * mi_hdl Memory IOV Handle. 616 * Output: 617 * None. 618 * Returns: 619 * IBT_SUCCESS 620 * Description: 621 * Un pin physical pages pinned during an ibt_map_mem_iov() call. 622 */ 623 ibt_status_t 624 ibt_unmap_mem_iov(ibt_hca_hdl_t hca_hdl, ibt_mi_hdl_t mi_hdl) 625 { 626 ibt_status_t status; 627 628 IBTF_DPRINTF_L3(ibtl_mem, "ibt_unmap_mem_iov(%p, %p)", 629 hca_hdl, mi_hdl); 630 631 status = (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_unmap_mem_iov( 632 IBTL_HCA2CIHCA(hca_hdl), mi_hdl)); 633 if (status == IBT_SUCCESS) { 634 mutex_enter(&hca_hdl->ha_mutex); 635 hca_hdl->ha_ma_cnt--; 636 mutex_exit(&hca_hdl->ha_mutex); 637 } 638 639 return (status); 640 } 641 642 /* 643 * Function: 644 * ibt_alloc_io_mem() 645 * Input: 646 * hca_hdl HCA Handle 647 * size Number of bytes to allocate 648 * mr_flag Possible values: IBT_MR_SLEEP, IBT_MR_NONCOHERENT 649 * Output: 650 * kaddrp Contains pointer to the virtual address of the 651 * memory allocated by this call. (Set to NULL if 652 * memory allocation fails). 653 * mem_alloc_hdl Memory access handle returned by ibt_mem_alloc() 654 * 655 * Returns: 656 * IBT_SUCCESS 657 * IBT_INSUFF_RESOURCE 658 * IBT_HCA_HDL_INVALID 659 * IBT_MR_ACCESS_REQ_INVALID 660 * IBT_INVALID_PARAM 661 * Description: 662 * Wrapper for ddi_dma_mem_alloc() 663 */ 664 ibt_status_t 665 ibt_alloc_io_mem(ibt_hca_hdl_t hca_hdl, size_t size, ibt_mr_flags_t mr_flag, 666 caddr_t *kaddrp, ibt_mem_alloc_hdl_t *mem_alloc_hdl) 667 { 668 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_alloc_io_mem( 669 IBTL_HCA2CIHCA(hca_hdl), size, mr_flag, kaddrp, 670 (ibc_mem_alloc_hdl_t *)mem_alloc_hdl)); 671 } 672 673 /* 674 * Function: 675 * ibt_free_io_mem() 676 * Input: 677 * hca_hdl HCA Handle 678 * mem_alloc_hdl Memory access handle returned by ibt_mem_alloc() 679 * Output: 680 * None 681 * 682 * Returns: 683 * IBT_SUCCESS 684 * Description: 685 * Wrapper for ddi_dma_mem_free() 686 */ 687 ibt_status_t 688 ibt_free_io_mem(ibt_hca_hdl_t hca_hdl, ibt_mem_alloc_hdl_t mem_alloc_hdl) 689 { 690 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_free_io_mem( 691 IBTL_HCA2CIHCA(hca_hdl), (ibc_mem_alloc_hdl_t)mem_alloc_hdl)); 692 } 693 694 /* 695 * Function: 696 * ibt_alloc_lkey() 697 * Input: 698 * hca_hdl HCA Handle 699 * pd A protection domain handle. 700 * flags Access control. 701 * phys_buf_list_sz Requested size of Physical Buffer List (PBL) 702 * resources to be allocated. 703 * Output: 704 * mr_hdl_p The returned IBT memory region handle. 705 * mem_desc_p Returned memory descriptor. 706 * Returns: 707 * IBT_SUCCESS 708 * Description: 709 * Allocates physical buffer list resources for use in memory 710 * registrations. 711 */ 712 ibt_status_t 713 ibt_alloc_lkey(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd, ibt_lkey_flags_t flags, 714 uint_t phys_buf_list_sz, ibt_mr_hdl_t *mr_hdl_p, 715 ibt_pmr_desc_t *mem_desc_p) 716 { 717 ibt_status_t status; 718 719 IBTF_DPRINTF_L3(ibtl_mem, "ibt_alloc_lkey(%p, %p, 0x%X, %d)", 720 hca_hdl, pd, flags, phys_buf_list_sz); 721 722 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_alloc_lkey( 723 IBTL_HCA2CIHCA(hca_hdl), pd, flags, phys_buf_list_sz, mr_hdl_p, 724 mem_desc_p); 725 if (status == IBT_SUCCESS) { 726 mutex_enter(&hca_hdl->ha_mutex); 727 hca_hdl->ha_mr_cnt++; 728 mutex_exit(&hca_hdl->ha_mutex); 729 } 730 731 return (status); 732 } 733 734 735 /* 736 * Function: 737 * ibt_register_phys_mr() 738 * Input: 739 * hca_hdl HCA Handle 740 * pd A protection domain handle. 741 * mem_pattr Requested memory region physical attributes. 742 * Output: 743 * mr_hdl_p The returned IBT memory region handle. 744 * mem_desc_p Returned memory descriptor. 745 * Returns: 746 * IBT_SUCCESS 747 * Description: 748 * Prepares a physically addressed memory region for use by a HCA. 749 */ 750 ibt_status_t 751 ibt_register_phys_mr(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd, 752 ibt_pmr_attr_t *mem_pattr, ibt_mr_hdl_t *mr_hdl_p, 753 ibt_pmr_desc_t *mem_desc_p) 754 { 755 ibt_status_t status; 756 757 IBTF_DPRINTF_L3(ibtl_mem, "ibt_register_phys_mr(%p, %p, %p)", 758 hca_hdl, pd, mem_pattr); 759 760 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_register_physical_mr( 761 IBTL_HCA2CIHCA(hca_hdl), pd, mem_pattr, 762 NULL, /* IBTL_HCA2MODI_P(hca_hdl)->mi_reserved */ 763 mr_hdl_p, mem_desc_p); 764 if (status == IBT_SUCCESS) { 765 mutex_enter(&hca_hdl->ha_mutex); 766 hca_hdl->ha_mr_cnt++; 767 mutex_exit(&hca_hdl->ha_mutex); 768 } 769 770 return (status); 771 } 772 773 774 /* 775 * Function: 776 * ibt_reregister_phys_mr() 777 * Input: 778 * hca_hdl HCA Handle 779 * mr_hdl The IBT memory region handle. 780 * pd A protection domain handle. 781 * mem_pattr Requested memory region physical attributes. 782 * Output: 783 * mr_hdl_p The returned IBT memory region handle. 784 * mem_desc_p Returned memory descriptor. 785 * Returns: 786 * IBT_SUCCESS 787 * Description: 788 * Prepares a physically addressed memory region for use by a HCA. 789 */ 790 ibt_status_t 791 ibt_reregister_phys_mr(ibt_hca_hdl_t hca_hdl, ibt_mr_hdl_t mr_hdl, 792 ibt_pd_hdl_t pd, ibt_pmr_attr_t *mem_pattr, ibt_mr_hdl_t *mr_hdl_p, 793 ibt_pmr_desc_t *mem_desc_p) 794 { 795 ibt_status_t status; 796 797 IBTF_DPRINTF_L3(ibtl_mem, "ibt_reregister_phys_mr(%p, %p, %p, %p)", 798 hca_hdl, mr_hdl, pd, mem_pattr); 799 800 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_reregister_physical_mr( 801 IBTL_HCA2CIHCA(hca_hdl), mr_hdl, pd, mem_pattr, 802 NULL, /* IBTL_HCA2MODI_P(hca_hdl)->mi_reserved */ 803 mr_hdl_p, mem_desc_p); 804 805 if (!(status == IBT_SUCCESS || status == IBT_MR_IN_USE || 806 status == IBT_HCA_HDL_INVALID || status == IBT_MR_HDL_INVALID)) { 807 IBTF_DPRINTF_L2(ibtl_mem, "ibt_reregister_phys_mr: " 808 "Re-registration Mem Failed: %d", status); 809 810 /* we lost one memory region resource */ 811 mutex_enter(&hca_hdl->ha_mutex); 812 hca_hdl->ha_mr_cnt--; 813 mutex_exit(&hca_hdl->ha_mutex); 814 815 } 816 return (status); 817 } 818 819 820 /* 821 * Fast Memory Registration (FMR). 822 * 823 * ibt_create_fmr_pool 824 * Not fast-path. 825 * ibt_create_fmr_pool() verifies that the HCA supports FMR and allocates 826 * and initializes an "FMR pool". This pool contains state specific to 827 * this registration, including the watermark setting to determine when 828 * to sync, and the total number of FMR regions available within this pool. 829 * 830 */ 831 ibt_status_t 832 ibt_create_fmr_pool(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd, 833 ibt_fmr_pool_attr_t *fmr_params, ibt_fmr_pool_hdl_t *fmr_pool_p) 834 { 835 ibt_status_t status; 836 837 IBTF_DPRINTF_L3(ibtl_mem, "ibt_create_fmr_pool(%p, %p, %p)", 838 hca_hdl, pd, fmr_params); 839 840 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_create_fmr_pool( 841 IBTL_HCA2CIHCA(hca_hdl), pd, fmr_params, fmr_pool_p); 842 if (status != IBT_SUCCESS) { 843 *fmr_pool_p = NULL; 844 return (status); 845 } 846 847 /* Update the FMR resource count */ 848 mutex_enter(&hca_hdl->ha_mutex); 849 hca_hdl->ha_fmr_pool_cnt++; 850 mutex_exit(&hca_hdl->ha_mutex); 851 852 return (status); 853 } 854 855 856 /* 857 * ibt_destroy_fmr_pool 858 * ibt_destroy_fmr_pool() deallocates all of the FMR regions in a specific 859 * pool. All state and information regarding the pool are destroyed and 860 * returned as free space once again. No more use of FMR regions in this 861 * pool are possible without a subsequent call to ibt_create_fmr_pool(). 862 */ 863 ibt_status_t 864 ibt_destroy_fmr_pool(ibt_hca_hdl_t hca_hdl, ibt_fmr_pool_hdl_t fmr_pool) 865 { 866 ibt_status_t status; 867 868 IBTF_DPRINTF_L3(ibtl_mem, "ibt_destroy_fmr_pool(%p, %p)", 869 hca_hdl, fmr_pool); 870 871 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_destroy_fmr_pool( 872 IBTL_HCA2CIHCA(hca_hdl), fmr_pool); 873 if (status != IBT_SUCCESS) { 874 IBTF_DPRINTF_L2(ibtl_mem, "ibt_destroy_fmr_pool: " 875 "CI FMR Pool destroy failed (%d)", status); 876 return (status); 877 } 878 879 mutex_enter(&hca_hdl->ha_mutex); 880 hca_hdl->ha_fmr_pool_cnt--; 881 mutex_exit(&hca_hdl->ha_mutex); 882 883 return (status); 884 } 885 886 /* 887 * ibt_flush_fmr_pool 888 * ibt_flush_fmr_pool forces a flush to occur. At the client's request, 889 * any unmapped FMR regions (See 'ibt_deregister_mr())') are returned to 890 * a free state. This function allows for an asynchronous cleanup of 891 * formerly used FMR regions. Sync operation is also performed internally 892 * by HCA driver, when 'watermark' settings for the number of free FMR 893 * regions left in the "pool" is reached. 894 */ 895 ibt_status_t 896 ibt_flush_fmr_pool(ibt_hca_hdl_t hca_hdl, ibt_fmr_pool_hdl_t fmr_pool) 897 { 898 IBTF_DPRINTF_L3(ibtl_mem, "ibt_flush_fmr_pool(%p, %p)", 899 hca_hdl, fmr_pool); 900 901 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_flush_fmr_pool( 902 IBTL_HCA2CIHCA(hca_hdl), fmr_pool)); 903 } 904 905 /* 906 * ibt_register_physical_fmr 907 * ibt_register_physical_fmr() assigns a "free" entry from the FMR Pool. 908 * It first consults the "FMR cache" to see if this is a duplicate memory 909 * registration to something already in use. If not, then a free entry 910 * in the "pool" is marked used. 911 */ 912 ibt_status_t 913 ibt_register_physical_fmr(ibt_hca_hdl_t hca_hdl, ibt_fmr_pool_hdl_t fmr_pool, 914 ibt_pmr_attr_t *mem_pattr, ibt_mr_hdl_t *mr_hdl_p, 915 ibt_pmr_desc_t *mem_desc_p) 916 { 917 IBTF_DPRINTF_L3(ibtl_mem, "ibt_register_physical_fmr(%p, %p, %p, %p)", 918 hca_hdl, fmr_pool, mem_pattr, mem_desc_p); 919 920 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_register_physical_fmr( 921 IBTL_HCA2CIHCA(hca_hdl), fmr_pool, mem_pattr, 922 NULL, /* IBTL_HCA2MODI_P(hca_hdl)->mi_reserved */ 923 mr_hdl_p, mem_desc_p)); 924 } 925 926 /* 927 * ibt_deregister_fmr 928 * The ibt_deregister_fmr un-maps the resources reserved from the FMR 929 * pool by ibt_register_physical_fmr(). The ibt_deregister_fmr() will 930 * mark the region as free in the FMR Pool. 931 */ 932 ibt_status_t 933 ibt_deregister_fmr(ibt_hca_hdl_t hca, ibt_mr_hdl_t mr_hdl) 934 { 935 IBTF_DPRINTF_L3(ibtl_mem, "ibt_deregister_fmr(%p, %p)", hca, mr_hdl); 936 937 return (IBTL_HCA2CIHCAOPS_P(hca)->ibc_deregister_fmr( 938 IBTL_HCA2CIHCA(hca), mr_hdl)); 939 } 940