1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 27 #include <sys/ib/ibtl/impl/ibtl.h> 28 29 /* 30 * ibtl_mem.c 31 * These routines implement all of the Memory Region verbs and the alloc/ 32 * query/free Memory Window verbs at the TI interface. 33 */ 34 35 static char ibtl_mem[] = "ibtl_mem"; 36 37 /* 38 * Function: 39 * ibt_register_mr() 40 * Input: 41 * hca_hdl - HCA Handle. 42 * pd - Protection Domain Handle. 43 * mem_attr - Requested memory region attributes. 44 * Output: 45 * mr_hdl_p - The returned IBT memory region handle. 46 * mem_desc - Returned memory descriptor. 47 * Returns: 48 * IBT_SUCCESS 49 * IBT_CHAN_HDL_INVALID 50 * IBT_MR_VA_INVALID 51 * IBT_MR_LEN_INVALID 52 * IBT_MR_ACCESS_REQ_INVALID 53 * IBT_PD_HDL_INVALID 54 * IBT_INSUFF_RESOURCE 55 * Description: 56 * Prepares a virtually addressed memory region for use by a HCA. A 57 * description of the registered memory suitable for use in Work Requests 58 * (WRs) is returned in the ibt_mr_desc_t parameter. 59 */ 60 ibt_status_t 61 ibt_register_mr(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd, ibt_mr_attr_t *mem_attr, 62 ibt_mr_hdl_t *mr_hdl_p, ibt_mr_desc_t *mem_desc) 63 { 64 ib_vaddr_t vaddr; 65 ibt_status_t status; 66 67 IBTF_DPRINTF_L3(ibtl_mem, "ibt_register_mr(%p, %p, %p)", 68 hca_hdl, pd, mem_attr); 69 70 vaddr = mem_attr->mr_vaddr; 71 72 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_register_mr( 73 IBTL_HCA2CIHCA(hca_hdl), pd, mem_attr, IBTL_HCA2CLNT(hca_hdl), 74 mr_hdl_p, mem_desc); 75 if (status == IBT_SUCCESS) { 76 mem_desc->md_vaddr = vaddr; 77 mutex_enter(&hca_hdl->ha_mutex); 78 hca_hdl->ha_mr_cnt++; 79 mutex_exit(&hca_hdl->ha_mutex); 80 } 81 82 return (status); 83 } 84 85 86 /* 87 * Function: 88 * ibt_register_buf() 89 * Input: 90 * hca_hdl HCA Handle. 91 * pd Protection Domain Handle. 92 * mem_bpattr Memory Registration attributes (IOVA and flags). 93 * bp A pointer to a buf(9S) struct. 94 * Output: 95 * mr_hdl_p The returned IBT memory region handle. 96 * mem_desc Returned memory descriptor. 97 * Returns: 98 * IBT_SUCCESS 99 * IBT_CHAN_HDL_INVALID 100 * IBT_MR_VA_INVALID 101 * IBT_MR_LEN_INVALID 102 * IBT_MR_ACCESS_REQ_INVALID 103 * IBT_PD_HDL_INVALID 104 * IBT_INSUFF_RESOURCE 105 * Description: 106 * Prepares a memory region described by a buf(9S) struct for use by a HCA. 107 * A description of the registered memory suitable for use in 108 * Work Requests (WRs) is returned in the ibt_mr_desc_t parameter. 109 */ 110 ibt_status_t 111 ibt_register_buf(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd, 112 ibt_smr_attr_t *mem_bpattr, struct buf *bp, ibt_mr_hdl_t *mr_hdl_p, 113 ibt_mr_desc_t *mem_desc) 114 { 115 ibt_status_t status; 116 117 IBTF_DPRINTF_L3(ibtl_mem, "ibt_register_buf(%p, %p, %p, %p)", 118 hca_hdl, pd, mem_bpattr, bp); 119 120 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_register_buf( 121 IBTL_HCA2CIHCA(hca_hdl), pd, mem_bpattr, bp, IBTL_HCA2CLNT(hca_hdl), 122 mr_hdl_p, mem_desc); 123 if (status == IBT_SUCCESS) { 124 mutex_enter(&hca_hdl->ha_mutex); 125 hca_hdl->ha_mr_cnt++; 126 mutex_exit(&hca_hdl->ha_mutex); 127 } 128 129 return (status); 130 } 131 132 133 /* 134 * Function: 135 * ibt_query_mr() 136 * Input: 137 * hca_hdl - HCA Handle. 138 * mr_hdl - The IBT Memory Region handle. 139 * Output: 140 * attr - The pointer to Memory region attributes structure. 141 * Returns: 142 * IBT_SUCCESS 143 * IBT_CHAN_HDL_INVALID 144 * IBT_MR_HDL_INVALID 145 * Description: 146 * Retrieves information about a specified memory region. 147 */ 148 ibt_status_t 149 ibt_query_mr(ibt_hca_hdl_t hca_hdl, ibt_mr_hdl_t mr_hdl, 150 ibt_mr_query_attr_t *attr) 151 { 152 IBTF_DPRINTF_L3(ibtl_mem, "ibt_query_mr(%p, %p)", hca_hdl, mr_hdl); 153 154 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_query_mr( 155 IBTL_HCA2CIHCA(hca_hdl), mr_hdl, attr)); 156 } 157 158 159 /* 160 * Function: 161 * ibt_deregister_mr() 162 * Input: 163 * hca_hdl - HCA Handle. 164 * mr_hdl - The IBT Memory Region handle. 165 * Output: 166 * none. 167 * Returns: 168 * IBT_SUCCESS 169 * IBT_CHAN_HDL_INVALID 170 * IBT_MR_HDL_INVALID 171 * IBT_MR_IN_USE 172 * Description: 173 * De-register the registered memory region. Remove a memory region from a 174 * HCA translation table, and free all resources associated with the 175 * memory region. 176 */ 177 ibt_status_t 178 ibt_deregister_mr(ibt_hca_hdl_t hca_hdl, ibt_mr_hdl_t mr_hdl) 179 { 180 ibt_status_t status; 181 182 IBTF_DPRINTF_L3(ibtl_mem, "ibt_deregister_mr(%p, %p)", hca_hdl, mr_hdl); 183 184 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_deregister_mr( 185 IBTL_HCA2CIHCA(hca_hdl), mr_hdl); 186 if (status == IBT_SUCCESS) { 187 mutex_enter(&hca_hdl->ha_mutex); 188 hca_hdl->ha_mr_cnt--; 189 mutex_exit(&hca_hdl->ha_mutex); 190 } 191 return (status); 192 } 193 194 195 /* 196 * Function: 197 * ibt_reregister_mr() 198 * Input: 199 * hca_hdl - HCA Handle. 200 * mr_hdl - The IBT Memory Region handle. 201 * pd - Optional Protection Domain Handle. 202 * mem_attr - Requested memory region attributes. 203 * Output: 204 * mr_hdl_p - The reregistered IBT memory region handle. 205 * mem_desc - Returned memory descriptor for the new memory region. 206 * Returns: 207 * IBT_SUCCESS 208 * IBT_CHAN_HDL_INVALID 209 * IBT_MR_HDL_INVALID 210 * IBT_MR_VA_INVALID 211 * IBT_MR_LEN_INVALID 212 * IBT_MR_ACCESS_REQ_INVALID 213 * IBT_PD_HDL_INVALID 214 * IBT_INSUFF_RESOURCE 215 * IBT_MR_IN_USE 216 * Description: 217 * Modify the attributes of an existing memory region. 218 */ 219 ibt_status_t 220 ibt_reregister_mr(ibt_hca_hdl_t hca_hdl, ibt_mr_hdl_t mr_hdl, ibt_pd_hdl_t pd, 221 ibt_mr_attr_t *mem_attr, ibt_mr_hdl_t *mr_hdl_p, ibt_mr_desc_t *mem_desc) 222 { 223 ibt_status_t status; 224 ib_vaddr_t vaddr = mem_attr->mr_vaddr; 225 226 IBTF_DPRINTF_L3(ibtl_mem, "ibt_reregister_mr(%p, %p, %p, %p)", 227 hca_hdl, mr_hdl, pd, mem_attr); 228 229 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_reregister_mr( 230 IBTL_HCA2CIHCA(hca_hdl), mr_hdl, pd, mem_attr, 231 IBTL_HCA2CLNT(hca_hdl), mr_hdl_p, mem_desc); 232 233 if (status == IBT_SUCCESS) 234 mem_desc->md_vaddr = vaddr; 235 else if (!(status == IBT_MR_IN_USE || status == IBT_HCA_HDL_INVALID || 236 status == IBT_MR_HDL_INVALID)) { 237 238 IBTF_DPRINTF_L2(ibtl_mem, "ibt_reregister_mr: " 239 "Re-registration Failed: %d", status); 240 241 /* we lost one memory region resource */ 242 mutex_enter(&hca_hdl->ha_mutex); 243 hca_hdl->ha_mr_cnt--; 244 mutex_exit(&hca_hdl->ha_mutex); 245 } 246 247 return (status); 248 } 249 250 251 /* 252 * Function: 253 * ibt_reregister_buf() 254 * Input: 255 * hca_hdl HCA Handle. 256 * mr_hdl The IBT Memory Region handle. 257 * pd Optional Protection Domain Handle. 258 * mem_bpattr Memory Registration attributes (IOVA and flags). 259 * bp A pointer to a buf(9S) struct. 260 * Output: 261 * mr_hdl_p The reregistered IBT memory region handle. 262 * mem_desc Returned memory descriptor for the new memory region. 263 * Returns: 264 * IBT_SUCCESS 265 * IBT_CHAN_HDL_INVALID 266 * IBT_MR_HDL_INVALID 267 * IBT_MR_VA_INVALID 268 * IBT_MR_LEN_INVALID 269 * IBT_MR_ACCESS_REQ_INVALID 270 * IBT_PD_HDL_INVALID 271 * IBT_INSUFF_RESOURCE 272 * IBT_MR_IN_USE 273 * Description: 274 * Modify the attributes of an existing memory region as described by a 275 * buf(9S) struct for use by a HCA. A description of the registered 276 * memory suitable for use in Work Requests (WRs) is returned in the 277 * ibt_mr_desc_t parameter. 278 */ 279 ibt_status_t 280 ibt_reregister_buf(ibt_hca_hdl_t hca_hdl, ibt_mr_hdl_t mr_hdl, 281 ibt_pd_hdl_t pd, ibt_smr_attr_t *mem_bpattr, struct buf *bp, 282 ibt_mr_hdl_t *mr_hdl_p, ibt_mr_desc_t *mem_desc) 283 { 284 ibt_status_t status; 285 286 IBTF_DPRINTF_L3(ibtl_mem, "ibt_reregister_buf(%p, %p, %p, %p, %p)", 287 hca_hdl, mr_hdl, pd, mem_bpattr, bp); 288 289 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_reregister_buf( 290 IBTL_HCA2CIHCA(hca_hdl), mr_hdl, pd, mem_bpattr, bp, 291 IBTL_HCA2CLNT(hca_hdl), mr_hdl_p, mem_desc); 292 293 if (!(status == IBT_SUCCESS || status == IBT_MR_IN_USE || 294 status == IBT_HCA_HDL_INVALID || status == IBT_MR_HDL_INVALID)) { 295 296 IBTF_DPRINTF_L2(ibtl_mem, "ibt_reregister_buf: " 297 "Re-registration Mem Failed: %d", status); 298 299 /* we lost one memory region resource */ 300 mutex_enter(&hca_hdl->ha_mutex); 301 hca_hdl->ha_mr_cnt--; 302 mutex_exit(&hca_hdl->ha_mutex); 303 } 304 return (status); 305 } 306 307 308 /* 309 * Function: 310 * ibt_register_shared_mr() 311 * Input: 312 * hca_hdl - HCA Handle. 313 * mr_hdl - The IBT Memory Region handle. 314 * pd - Protection Domain Handle. 315 * mem_sattr - Requested memory region shared attributes. 316 * Output: 317 * mr_hdl_p - The reregistered IBT memory region handle. 318 * mem_desc - Returned memory descriptor for the new memory region. 319 * Returns: 320 * IBT_SUCCESS 321 * IBT_INSUFF_RESOURCE 322 * IBT_CHAN_HDL_INVALID 323 * IBT_MR_HDL_INVALID 324 * IBT_PD_HDL_INVALID 325 * IBT_MR_ACCESS_REQ_INVALID 326 * Description: 327 * Given an existing memory region, a new memory region associated with 328 * the same physical locations is created. 329 */ 330 ibt_status_t 331 ibt_register_shared_mr(ibt_hca_hdl_t hca_hdl, ibt_mr_hdl_t mr_hdl, 332 ibt_pd_hdl_t pd, ibt_smr_attr_t *mem_sattr, ibt_mr_hdl_t *mr_hdl_p, 333 ibt_mr_desc_t *mem_desc) 334 { 335 ibt_status_t status; 336 337 IBTF_DPRINTF_L3(ibtl_mem, "ibt_register_shared_mr(%p, %p, %p, %p)", 338 hca_hdl, mr_hdl, pd, mem_sattr); 339 340 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_register_shared_mr( 341 IBTL_HCA2CIHCA(hca_hdl), mr_hdl, pd, mem_sattr, 342 IBTL_HCA2CLNT(hca_hdl), mr_hdl_p, mem_desc); 343 if (status == IBT_SUCCESS) { 344 mutex_enter(&hca_hdl->ha_mutex); 345 hca_hdl->ha_mr_cnt++; 346 mutex_exit(&hca_hdl->ha_mutex); 347 } 348 return (status); 349 } 350 351 /* 352 * Function: 353 * ibt_sync_mr() 354 * Input: 355 * hca_hdl - HCA Handle. 356 * mr_segments - A pointer to an array of ibt_mr_sync_t that describes 357 * the memory regions to sync. 358 * num_segments - The length of the mr_segments array. 359 * Output: 360 * NONE 361 * Returns: 362 * IBT_SUCCESS 363 * IBT_HCA_HDL_INVALID 364 * IBT_MR_HDL_INVALID 365 * IBT_INVALID_PARAM 366 * IBT_MR_VA_INVALID 367 * IBT_MR_LEN_INVALID 368 * Description: 369 * Make memory changes visible to incoming RDMA reads, or make the affects 370 * of an incoming RDMA writes visible to the consumer. 371 */ 372 ibt_status_t 373 ibt_sync_mr(ibt_hca_hdl_t hca_hdl, ibt_mr_sync_t *mr_segments, 374 size_t num_segments) 375 376 { 377 IBTF_DPRINTF_L3(ibtl_mem, "ibt_sync_mr(%p, %p, %d)", hca_hdl, 378 mr_segments, num_segments); 379 380 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_sync_mr( 381 IBTL_HCA2CIHCA(hca_hdl), mr_segments, num_segments)); 382 } 383 384 385 /* 386 * Function: 387 * ibt_alloc_mw() 388 * Input: 389 * hca_hdl - HCA Handle. 390 * pd - Protection Domain Handle. 391 * flags - Memory Window alloc flags. 392 * Output: 393 * mw_hdl_p - The returned IBT Memory Window handle. 394 * rkey - The IBT R_Key handle. 395 * Returns: 396 * IBT_SUCCESS 397 * IBT_INSUFF_RESOURCE 398 * IBT_CHAN_HDL_INVALID 399 * IBT_PD_HDL_INVALID 400 * Description: 401 * Allocate a memory window from the HCA. 402 */ 403 ibt_status_t 404 ibt_alloc_mw(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd, ibt_mw_flags_t flags, 405 ibt_mw_hdl_t *mw_hdl_p, ibt_rkey_t *rkey) 406 { 407 ibt_status_t status; 408 409 IBTF_DPRINTF_L3(ibtl_mem, "ibt_alloc_mw(%p, %p, 0x%x)", 410 hca_hdl, pd, flags); 411 412 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_alloc_mw( 413 IBTL_HCA2CIHCA(hca_hdl), pd, flags, mw_hdl_p, rkey); 414 415 /* 416 * XXX - We should be able to allocate state and have a IBTF Memory 417 * Window Handle. Memory Windows are meant to be rebound on the fly 418 * (using a post) to make them fast. It is expected that alloc memory 419 * window will be done in a relatively static manner. But, we don't have 420 * a good reason to have local MW state at this point, so we won't. 421 */ 422 if (status == IBT_SUCCESS) { 423 mutex_enter(&hca_hdl->ha_mutex); 424 hca_hdl->ha_mw_cnt++; 425 mutex_exit(&hca_hdl->ha_mutex); 426 } 427 return (status); 428 } 429 430 431 /* 432 * Function: 433 * ibt_query_mw() 434 * Input: 435 * hca_hdl - HCA Handle. 436 * mw_hdl - The IBT Memory Window handle. 437 * Output: 438 * pd - Protection Domain Handle. 439 * rkey - The IBT R_Key handle. 440 * Returns: 441 * IBT_SUCCESS 442 * IBT_CHAN_HDL_INVALID 443 * IBT_MW_HDL_INVALID 444 * Description: 445 * Retrieves information about a specified memory region. 446 */ 447 ibt_status_t 448 ibt_query_mw(ibt_hca_hdl_t hca_hdl, ibt_mw_hdl_t mw_hdl, 449 ibt_mw_query_attr_t *mw_attr_p) 450 { 451 IBTF_DPRINTF_L3(ibtl_mem, "ibt_query_mw(%p, %p)", hca_hdl, mw_hdl); 452 453 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_query_mw( 454 IBTL_HCA2CIHCA(hca_hdl), mw_hdl, mw_attr_p)); 455 } 456 457 458 /* 459 * Function: 460 * ibt_free_mw() 461 * Input: 462 * hca_hdl - HCA Handle 463 * mw_hdl - The IBT Memory Window handle. 464 * Output: 465 * none. 466 * Returns: 467 * IBT_SUCCESS 468 * IBT_CHAN_HDL_INVALID 469 * IBT_MW_HDL_INVALID 470 * Description: 471 * De-allocate the Memory Window. 472 */ 473 ibt_status_t 474 ibt_free_mw(ibt_hca_hdl_t hca_hdl, ibt_mw_hdl_t mw_hdl) 475 { 476 ibt_status_t status; 477 478 IBTF_DPRINTF_L3(ibtl_mem, "ibt_free_mw(%p, %p)", hca_hdl, mw_hdl); 479 480 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_free_mw( 481 IBTL_HCA2CIHCA(hca_hdl), mw_hdl); 482 483 if (status == IBT_SUCCESS) { 484 mutex_enter(&hca_hdl->ha_mutex); 485 hca_hdl->ha_mw_cnt--; 486 mutex_exit(&hca_hdl->ha_mutex); 487 } 488 return (status); 489 } 490 491 492 /* 493 * Function: 494 * ibt_map_mem_area() 495 * Input: 496 * hca_hdl HCA Handle 497 * va_attrs A pointer to an ibt_va_attr_t that describes the 498 * VA to be translated. 499 * paddr_list_len The number of entries in the 'paddr_list_p' array. 500 * Output: 501 * paddr_list_p Array of ibt_phys_buf_t (allocated by the caller), 502 * in which the physical buffers that map the virtual 503 * buffer are returned. 504 * num_paddr_p The actual number of ibt_phys_buf_t that were 505 * returned in the 'paddr_list_p' array. 506 * ma_hdl_p Memory Area Handle. 507 * Returns: 508 * IBT_SUCCESS 509 * Description: 510 * Translate a kernel virtual address range into HCA physical addresses. 511 * A set of physical addresses, that can be used with "Reserved L_Key", 512 * register physical, and "Fast Registration Work Request" operations 513 * is returned. 514 */ 515 ibt_status_t 516 ibt_map_mem_area(ibt_hca_hdl_t hca_hdl, ibt_va_attr_t *va_attrs, 517 uint_t paddr_list_len, ibt_reg_req_t *reg_req, ibt_ma_hdl_t *ma_hdl_p) 518 { 519 ibt_status_t status; 520 521 IBTF_DPRINTF_L3(ibtl_mem, "ibt_map_mem_area(%p, %p, %d)", 522 hca_hdl, va_attrs, paddr_list_len); 523 524 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_map_mem_area( 525 IBTL_HCA2CIHCA(hca_hdl), va_attrs, 526 NULL, /* IBTL_HCA2MODI_P(hca_hdl)->mi_reserved */ 527 paddr_list_len, reg_req, ma_hdl_p); 528 if (status == IBT_SUCCESS) { 529 mutex_enter(&hca_hdl->ha_mutex); 530 hca_hdl->ha_ma_cnt++; 531 mutex_exit(&hca_hdl->ha_mutex); 532 } 533 return (status); 534 } 535 536 537 /* 538 * Function: 539 * ibt_unmap_mem_area() 540 * Input: 541 * hca_hdl HCA Handle 542 * ma_hdl Memory Area Handle. 543 * Output: 544 * None. 545 * Returns: 546 * IBT_SUCCESS 547 * Description: 548 * Un pin physical pages pinned during an ibt_map_mem_area() call. 549 */ 550 ibt_status_t 551 ibt_unmap_mem_area(ibt_hca_hdl_t hca_hdl, ibt_ma_hdl_t ma_hdl) 552 { 553 ibt_status_t status; 554 555 IBTF_DPRINTF_L3(ibtl_mem, "ibt_unmap_mem_area(%p, %p)", 556 hca_hdl, ma_hdl); 557 558 status = (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_unmap_mem_area( 559 IBTL_HCA2CIHCA(hca_hdl), ma_hdl)); 560 if (status == IBT_SUCCESS) { 561 mutex_enter(&hca_hdl->ha_mutex); 562 hca_hdl->ha_ma_cnt--; 563 mutex_exit(&hca_hdl->ha_mutex); 564 } 565 566 return (status); 567 } 568 569 /* 570 * Function: 571 * ibt_map_mem_iov() 572 * Input: 573 * hca_hdl HCA Handle 574 * iov_attr A pointer to an ibt_iov_attr_t that describes the 575 * virtual ranges to be translated. 576 * Output: 577 * wr A pointer to the work request where the output 578 * sgl (reserved_lkey, size, paddr) will be written. 579 * mi_hdl_p Memory IOV Handle. 580 * Returns: 581 * IBT_SUCCESS 582 * Description: 583 * Translate an array of virtual address ranges into HCA physical 584 * addresses, sizes, and reserved_lkey. 585 */ 586 ibt_status_t 587 ibt_map_mem_iov(ibt_hca_hdl_t hca_hdl, ibt_iov_attr_t *iov_attr, 588 ibt_all_wr_t *wr, ibt_mi_hdl_t *mi_hdl_p) 589 { 590 ibt_status_t status; 591 592 IBTF_DPRINTF_L3(ibtl_mem, "ibt_map_mem_iov(%p, %p, %p)", 593 hca_hdl, iov_attr, wr); 594 595 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_map_mem_iov( 596 IBTL_HCA2CIHCA(hca_hdl), iov_attr, wr, mi_hdl_p); 597 if (status == IBT_SUCCESS) { 598 mutex_enter(&hca_hdl->ha_mutex); 599 hca_hdl->ha_ma_cnt++; 600 mutex_exit(&hca_hdl->ha_mutex); 601 } 602 603 return (status); 604 } 605 606 607 /* 608 * Function: 609 * ibt_unmap_mem_iov() 610 * Input: 611 * hca_hdl HCA Handle 612 * mi_hdl Memory IOV Handle. 613 * Output: 614 * None. 615 * Returns: 616 * IBT_SUCCESS 617 * Description: 618 * Un pin physical pages pinned during an ibt_map_mem_iov() call. 619 */ 620 ibt_status_t 621 ibt_unmap_mem_iov(ibt_hca_hdl_t hca_hdl, ibt_mi_hdl_t mi_hdl) 622 { 623 ibt_status_t status; 624 625 IBTF_DPRINTF_L3(ibtl_mem, "ibt_unmap_mem_iov(%p, %p)", 626 hca_hdl, mi_hdl); 627 628 status = (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_unmap_mem_iov( 629 IBTL_HCA2CIHCA(hca_hdl), mi_hdl)); 630 if (status == IBT_SUCCESS) { 631 mutex_enter(&hca_hdl->ha_mutex); 632 hca_hdl->ha_ma_cnt--; 633 mutex_exit(&hca_hdl->ha_mutex); 634 } 635 636 return (status); 637 } 638 639 /* 640 * Function: 641 * ibt_alloc_io_mem() 642 * Input: 643 * hca_hdl HCA Handle 644 * size Number of bytes to allocate 645 * mr_flag Possible values: IBT_MR_SLEEP, IBT_MR_NONCOHERENT 646 * Output: 647 * kaddrp Contains pointer to the virtual address of the 648 * memory allocated by this call. (Set to NULL if 649 * memory allocation fails). 650 * mem_alloc_hdl Memory access handle returned by ibt_mem_alloc() 651 * 652 * Returns: 653 * IBT_SUCCESS 654 * IBT_INSUFF_RESOURCE 655 * IBT_HCA_HDL_INVALID 656 * IBT_MR_ACCESS_REQ_INVALID 657 * IBT_INVALID_PARAM 658 * Description: 659 * Wrapper for ddi_dma_mem_alloc() 660 */ 661 ibt_status_t 662 ibt_alloc_io_mem(ibt_hca_hdl_t hca_hdl, size_t size, ibt_mr_flags_t mr_flag, 663 caddr_t *kaddrp, ibt_mem_alloc_hdl_t *mem_alloc_hdl) 664 { 665 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_alloc_io_mem( 666 IBTL_HCA2CIHCA(hca_hdl), size, mr_flag, kaddrp, 667 (ibc_mem_alloc_hdl_t *)mem_alloc_hdl)); 668 } 669 670 /* 671 * Function: 672 * ibt_free_io_mem() 673 * Input: 674 * hca_hdl HCA Handle 675 * mem_alloc_hdl Memory access handle returned by ibt_mem_alloc() 676 * Output: 677 * None 678 * 679 * Returns: 680 * IBT_SUCCESS 681 * Description: 682 * Wrapper for ddi_dma_mem_free() 683 */ 684 ibt_status_t 685 ibt_free_io_mem(ibt_hca_hdl_t hca_hdl, ibt_mem_alloc_hdl_t mem_alloc_hdl) 686 { 687 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_free_io_mem( 688 IBTL_HCA2CIHCA(hca_hdl), (ibc_mem_alloc_hdl_t)mem_alloc_hdl)); 689 } 690 691 /* 692 * Function: 693 * ibt_alloc_lkey() 694 * Input: 695 * hca_hdl HCA Handle 696 * pd A protection domain handle. 697 * flags Access control. 698 * phys_buf_list_sz Requested size of Physical Buffer List (PBL) 699 * resources to be allocated. 700 * Output: 701 * mr_hdl_p The returned IBT memory region handle. 702 * mem_desc_p Returned memory descriptor. 703 * Returns: 704 * IBT_SUCCESS 705 * Description: 706 * Allocates physical buffer list resources for use in memory 707 * registrations. 708 */ 709 ibt_status_t 710 ibt_alloc_lkey(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd, ibt_lkey_flags_t flags, 711 uint_t phys_buf_list_sz, ibt_mr_hdl_t *mr_hdl_p, 712 ibt_pmr_desc_t *mem_desc_p) 713 { 714 ibt_status_t status; 715 716 IBTF_DPRINTF_L3(ibtl_mem, "ibt_alloc_lkey(%p, %p, 0x%X, %d)", 717 hca_hdl, pd, flags, phys_buf_list_sz); 718 719 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_alloc_lkey( 720 IBTL_HCA2CIHCA(hca_hdl), pd, flags, phys_buf_list_sz, mr_hdl_p, 721 mem_desc_p); 722 if (status == IBT_SUCCESS) { 723 mutex_enter(&hca_hdl->ha_mutex); 724 hca_hdl->ha_mr_cnt++; 725 mutex_exit(&hca_hdl->ha_mutex); 726 } 727 728 return (status); 729 } 730 731 732 /* 733 * Function: 734 * ibt_register_phys_mr() 735 * Input: 736 * hca_hdl HCA Handle 737 * pd A protection domain handle. 738 * mem_pattr Requested memory region physical attributes. 739 * Output: 740 * mr_hdl_p The returned IBT memory region handle. 741 * mem_desc_p Returned memory descriptor. 742 * Returns: 743 * IBT_SUCCESS 744 * Description: 745 * Prepares a physically addressed memory region for use by a HCA. 746 */ 747 ibt_status_t 748 ibt_register_phys_mr(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd, 749 ibt_pmr_attr_t *mem_pattr, ibt_mr_hdl_t *mr_hdl_p, 750 ibt_pmr_desc_t *mem_desc_p) 751 { 752 ibt_status_t status; 753 754 IBTF_DPRINTF_L3(ibtl_mem, "ibt_register_phys_mr(%p, %p, %p)", 755 hca_hdl, pd, mem_pattr); 756 757 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_register_physical_mr( 758 IBTL_HCA2CIHCA(hca_hdl), pd, mem_pattr, 759 NULL, /* IBTL_HCA2MODI_P(hca_hdl)->mi_reserved */ 760 mr_hdl_p, mem_desc_p); 761 if (status == IBT_SUCCESS) { 762 mutex_enter(&hca_hdl->ha_mutex); 763 hca_hdl->ha_mr_cnt++; 764 mutex_exit(&hca_hdl->ha_mutex); 765 } 766 767 return (status); 768 } 769 770 771 /* 772 * Function: 773 * ibt_reregister_phys_mr() 774 * Input: 775 * hca_hdl HCA Handle 776 * mr_hdl The IBT memory region handle. 777 * pd A protection domain handle. 778 * mem_pattr Requested memory region physical attributes. 779 * Output: 780 * mr_hdl_p The returned IBT memory region handle. 781 * mem_desc_p Returned memory descriptor. 782 * Returns: 783 * IBT_SUCCESS 784 * Description: 785 * Prepares a physically addressed memory region for use by a HCA. 786 */ 787 ibt_status_t 788 ibt_reregister_phys_mr(ibt_hca_hdl_t hca_hdl, ibt_mr_hdl_t mr_hdl, 789 ibt_pd_hdl_t pd, ibt_pmr_attr_t *mem_pattr, ibt_mr_hdl_t *mr_hdl_p, 790 ibt_pmr_desc_t *mem_desc_p) 791 { 792 ibt_status_t status; 793 794 IBTF_DPRINTF_L3(ibtl_mem, "ibt_reregister_phys_mr(%p, %p, %p, %p)", 795 hca_hdl, mr_hdl, pd, mem_pattr); 796 797 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_reregister_physical_mr( 798 IBTL_HCA2CIHCA(hca_hdl), mr_hdl, pd, mem_pattr, 799 NULL, /* IBTL_HCA2MODI_P(hca_hdl)->mi_reserved */ 800 mr_hdl_p, mem_desc_p); 801 802 if (!(status == IBT_SUCCESS || status == IBT_MR_IN_USE || 803 status == IBT_HCA_HDL_INVALID || status == IBT_MR_HDL_INVALID)) { 804 IBTF_DPRINTF_L2(ibtl_mem, "ibt_reregister_phys_mr: " 805 "Re-registration Mem Failed: %d", status); 806 807 /* we lost one memory region resource */ 808 mutex_enter(&hca_hdl->ha_mutex); 809 hca_hdl->ha_mr_cnt--; 810 mutex_exit(&hca_hdl->ha_mutex); 811 812 } 813 return (status); 814 } 815 816 817 /* 818 * Fast Memory Registration (FMR). 819 * 820 * ibt_create_fmr_pool 821 * Not fast-path. 822 * ibt_create_fmr_pool() verifies that the HCA supports FMR and allocates 823 * and initializes an "FMR pool". This pool contains state specific to 824 * this registration, including the watermark setting to determine when 825 * to sync, and the total number of FMR regions available within this pool. 826 * 827 */ 828 ibt_status_t 829 ibt_create_fmr_pool(ibt_hca_hdl_t hca_hdl, ibt_pd_hdl_t pd, 830 ibt_fmr_pool_attr_t *fmr_params, ibt_fmr_pool_hdl_t *fmr_pool_p) 831 { 832 ibt_status_t status; 833 834 IBTF_DPRINTF_L3(ibtl_mem, "ibt_create_fmr_pool(%p, %p, %p)", 835 hca_hdl, pd, fmr_params); 836 837 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_create_fmr_pool( 838 IBTL_HCA2CIHCA(hca_hdl), pd, fmr_params, fmr_pool_p); 839 if (status != IBT_SUCCESS) { 840 *fmr_pool_p = NULL; 841 return (status); 842 } 843 844 /* Update the FMR resource count */ 845 mutex_enter(&hca_hdl->ha_mutex); 846 hca_hdl->ha_fmr_pool_cnt++; 847 mutex_exit(&hca_hdl->ha_mutex); 848 849 return (status); 850 } 851 852 853 /* 854 * ibt_destroy_fmr_pool 855 * ibt_destroy_fmr_pool() deallocates all of the FMR regions in a specific 856 * pool. All state and information regarding the pool are destroyed and 857 * returned as free space once again. No more use of FMR regions in this 858 * pool are possible without a subsequent call to ibt_create_fmr_pool(). 859 */ 860 ibt_status_t 861 ibt_destroy_fmr_pool(ibt_hca_hdl_t hca_hdl, ibt_fmr_pool_hdl_t fmr_pool) 862 { 863 ibt_status_t status; 864 865 IBTF_DPRINTF_L3(ibtl_mem, "ibt_destroy_fmr_pool(%p, %p)", 866 hca_hdl, fmr_pool); 867 868 status = IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_destroy_fmr_pool( 869 IBTL_HCA2CIHCA(hca_hdl), fmr_pool); 870 if (status != IBT_SUCCESS) { 871 IBTF_DPRINTF_L2(ibtl_mem, "ibt_destroy_fmr_pool: " 872 "CI FMR Pool destroy failed (%d)", status); 873 return (status); 874 } 875 876 mutex_enter(&hca_hdl->ha_mutex); 877 hca_hdl->ha_fmr_pool_cnt--; 878 mutex_exit(&hca_hdl->ha_mutex); 879 880 return (status); 881 } 882 883 /* 884 * ibt_flush_fmr_pool 885 * ibt_flush_fmr_pool forces a flush to occur. At the client's request, 886 * any unmapped FMR regions (See 'ibt_deregister_mr())') are returned to 887 * a free state. This function allows for an asynchronous cleanup of 888 * formerly used FMR regions. Sync operation is also performed internally 889 * by HCA driver, when 'watermark' settings for the number of free FMR 890 * regions left in the "pool" is reached. 891 */ 892 ibt_status_t 893 ibt_flush_fmr_pool(ibt_hca_hdl_t hca_hdl, ibt_fmr_pool_hdl_t fmr_pool) 894 { 895 IBTF_DPRINTF_L3(ibtl_mem, "ibt_flush_fmr_pool(%p, %p)", 896 hca_hdl, fmr_pool); 897 898 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_flush_fmr_pool( 899 IBTL_HCA2CIHCA(hca_hdl), fmr_pool)); 900 } 901 902 /* 903 * ibt_register_physical_fmr 904 * ibt_register_physical_fmr() assigns a "free" entry from the FMR Pool. 905 * It first consults the "FMR cache" to see if this is a duplicate memory 906 * registration to something already in use. If not, then a free entry 907 * in the "pool" is marked used. 908 */ 909 ibt_status_t 910 ibt_register_physical_fmr(ibt_hca_hdl_t hca_hdl, ibt_fmr_pool_hdl_t fmr_pool, 911 ibt_pmr_attr_t *mem_pattr, ibt_mr_hdl_t *mr_hdl_p, 912 ibt_pmr_desc_t *mem_desc_p) 913 { 914 IBTF_DPRINTF_L3(ibtl_mem, "ibt_register_physical_fmr(%p, %p, %p, %p)", 915 hca_hdl, fmr_pool, mem_pattr, mem_desc_p); 916 917 return (IBTL_HCA2CIHCAOPS_P(hca_hdl)->ibc_register_physical_fmr( 918 IBTL_HCA2CIHCA(hca_hdl), fmr_pool, mem_pattr, 919 NULL, /* IBTL_HCA2MODI_P(hca_hdl)->mi_reserved */ 920 mr_hdl_p, mem_desc_p)); 921 } 922 923 /* 924 * ibt_deregister_fmr 925 * The ibt_deregister_fmr un-maps the resources reserved from the FMR 926 * pool by ibt_register_physical_fmr(). The ibt_deregister_fmr() will 927 * mark the region as free in the FMR Pool. 928 */ 929 ibt_status_t 930 ibt_deregister_fmr(ibt_hca_hdl_t hca, ibt_mr_hdl_t mr_hdl) 931 { 932 IBTF_DPRINTF_L3(ibtl_mem, "ibt_deregister_fmr(%p, %p)", hca, mr_hdl); 933 934 return (IBTL_HCA2CIHCAOPS_P(hca)->ibc_deregister_fmr( 935 IBTL_HCA2CIHCA(hca), mr_hdl)); 936 } 937