1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 39 #ifdef TCP_OFFLOAD 40 #include <linux/types.h> 41 #include <linux/kref.h> 42 #include <rdma/ib_umem.h> 43 #include <asm/atomic.h> 44 45 #include <common/t4_msg.h> 46 #include "iw_cxgbe.h" 47 48 int use_dsgl = 1; 49 #define T4_ULPTX_MIN_IO 32 50 #define C4IW_MAX_INLINE_SIZE 96 51 52 static int 53 mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length) 54 { 55 56 return (is_t5(dev->rdev.adap) && length >= 8*1024*1024*1024ULL); 57 } 58 59 static int 60 write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) 61 { 62 struct adapter *sc = rdev->adap; 63 struct ulp_mem_io *ulpmc; 64 struct ulptx_idata *ulpsc; 65 u8 wr_len, *to_dp, *from_dp; 66 int copy_len, num_wqe, i, ret = 0; 67 struct c4iw_wr_wait wr_wait; 68 struct wrqe *wr; 69 u32 cmd; 70 71 cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 72 73 cmd |= cpu_to_be32(F_T5_ULP_MEMIO_IMM); 74 75 addr &= 0x7FFFFFF; 76 CTR3(KTR_IW_CXGBE, "%s addr 0x%x len %u", __func__, addr, len); 77 num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE); 78 c4iw_init_wr_wait(&wr_wait); 79 for (i = 0; i < num_wqe; i++) { 80 81 copy_len = min(len, C4IW_MAX_INLINE_SIZE); 82 wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc + 83 roundup(copy_len, T4_ULPTX_MIN_IO), 16); 84 85 wr = alloc_wrqe(wr_len, &sc->sge.mgmtq); 86 if (wr == NULL) 87 return (0); 88 ulpmc = wrtod(wr); 89 90 memset(ulpmc, 0, wr_len); 91 INIT_ULPTX_WR(ulpmc, wr_len, 0, 0); 92 93 if (i == (num_wqe-1)) { 94 ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) | 95 F_FW_WR_COMPL); 96 ulpmc->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait; 97 } else 98 ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR)); 99 ulpmc->wr.wr_mid = cpu_to_be32( 100 V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); 101 102 ulpmc->cmd = cmd; 103 ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN( 104 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO))); 105 ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr), 106 16)); 107 ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr + i * 3)); 108 109 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 110 ulpsc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 111 ulpsc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO)); 112 113 to_dp = (u8 *)(ulpsc + 1); 114 from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE; 115 if (data) 116 memcpy(to_dp, from_dp, copy_len); 117 else 118 memset(to_dp, 0, copy_len); 119 if (copy_len % T4_ULPTX_MIN_IO) 120 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO - 121 (copy_len % T4_ULPTX_MIN_IO)); 122 t4_wrq_tx(sc, wr); 123 len -= C4IW_MAX_INLINE_SIZE; 124 } 125 126 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__); 127 return ret; 128 } 129 130 /* 131 * Build and write a TPT entry. 132 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size, 133 * pbl_size and pbl_addr 134 * OUT: stag index 135 */ 136 static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, 137 u32 *stag, u8 stag_state, u32 pdid, 138 enum fw_ri_stag_type type, enum fw_ri_mem_perms perm, 139 int bind_enabled, u32 zbva, u64 to, 140 u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr) 141 { 142 int err; 143 struct fw_ri_tpte tpt; 144 u32 stag_idx; 145 static atomic_t key; 146 147 if (c4iw_fatal_error(rdev)) 148 return -EIO; 149 150 stag_state = stag_state > 0; 151 stag_idx = (*stag) >> 8; 152 153 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) { 154 stag_idx = c4iw_get_resource(&rdev->resource.tpt_table); 155 if (!stag_idx) { 156 mutex_lock(&rdev->stats.lock); 157 rdev->stats.stag.fail++; 158 mutex_unlock(&rdev->stats.lock); 159 return -ENOMEM; 160 } 161 mutex_lock(&rdev->stats.lock); 162 rdev->stats.stag.cur += 32; 163 if (rdev->stats.stag.cur > rdev->stats.stag.max) 164 rdev->stats.stag.max = rdev->stats.stag.cur; 165 mutex_unlock(&rdev->stats.lock); 166 *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); 167 } 168 CTR5(KTR_IW_CXGBE, 169 "%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x", 170 __func__, stag_state, type, pdid, stag_idx); 171 172 /* write TPT entry */ 173 if (reset_tpt_entry) 174 memset(&tpt, 0, sizeof(tpt)); 175 else { 176 tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID | 177 V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) | 178 V_FW_RI_TPTE_STAGSTATE(stag_state) | 179 V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid)); 180 tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) | 181 (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) | 182 V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO : 183 FW_RI_VA_BASED_TO))| 184 V_FW_RI_TPTE_PS(page_size)); 185 tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32( 186 V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3)); 187 tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL)); 188 tpt.va_hi = cpu_to_be32((u32)(to >> 32)); 189 tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL)); 190 tpt.dca_mwbcnt_pstag = cpu_to_be32(0); 191 tpt.len_hi = cpu_to_be32((u32)(len >> 32)); 192 } 193 err = write_adapter_mem(rdev, stag_idx + 194 (rdev->adap->vres.stag.start >> 5), 195 sizeof(tpt), &tpt); 196 197 if (reset_tpt_entry) { 198 c4iw_put_resource(&rdev->resource.tpt_table, stag_idx); 199 mutex_lock(&rdev->stats.lock); 200 rdev->stats.stag.cur -= 32; 201 mutex_unlock(&rdev->stats.lock); 202 } 203 return err; 204 } 205 206 static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl, 207 u32 pbl_addr, u32 pbl_size) 208 { 209 int err; 210 211 CTR4(KTR_IW_CXGBE, "%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d", 212 __func__, pbl_addr, rdev->adap->vres.pbl.start, pbl_size); 213 214 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl); 215 return err; 216 } 217 218 static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size, 219 u32 pbl_addr) 220 { 221 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 222 pbl_size, pbl_addr); 223 } 224 225 static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid) 226 { 227 *stag = T4_STAG_UNSET; 228 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0, 229 0UL, 0, 0, 0, 0); 230 } 231 232 static int deallocate_window(struct c4iw_rdev *rdev, u32 stag) 233 { 234 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0, 235 0); 236 } 237 238 static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid, 239 u32 pbl_size, u32 pbl_addr) 240 { 241 *stag = T4_STAG_UNSET; 242 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0, 243 0UL, 0, 0, pbl_size, pbl_addr); 244 } 245 246 static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag) 247 { 248 u32 mmid; 249 250 mhp->attr.state = 1; 251 mhp->attr.stag = stag; 252 mmid = stag >> 8; 253 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 254 CTR3(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p", __func__, mmid, mhp); 255 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid); 256 } 257 258 static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, 259 struct c4iw_mr *mhp, int shift) 260 { 261 u32 stag = T4_STAG_UNSET; 262 int ret; 263 264 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, 265 FW_RI_STAG_NSMR, mhp->attr.len ? mhp->attr.perms : 0, 266 mhp->attr.mw_bind_enable, mhp->attr.zbva, 267 mhp->attr.va_fbo, mhp->attr.len ? mhp->attr.len : -1, shift - 12, 268 mhp->attr.pbl_size, mhp->attr.pbl_addr); 269 if (ret) 270 return ret; 271 272 ret = finish_mem_reg(mhp, stag); 273 if (ret) 274 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 275 mhp->attr.pbl_addr); 276 return ret; 277 } 278 279 static int alloc_pbl(struct c4iw_mr *mhp, int npages) 280 { 281 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev, 282 npages << 3); 283 284 if (!mhp->attr.pbl_addr) 285 return -ENOMEM; 286 287 mhp->attr.pbl_size = npages; 288 289 return 0; 290 } 291 292 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) 293 { 294 struct c4iw_dev *rhp; 295 struct c4iw_pd *php; 296 struct c4iw_mr *mhp; 297 int ret; 298 u32 stag = T4_STAG_UNSET; 299 300 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd); 301 php = to_c4iw_pd(pd); 302 rhp = php->rhp; 303 304 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 305 if (!mhp) 306 return ERR_PTR(-ENOMEM); 307 308 mhp->rhp = rhp; 309 mhp->attr.pdid = php->pdid; 310 mhp->attr.perms = c4iw_ib_to_tpt_access(acc); 311 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND; 312 mhp->attr.zbva = 0; 313 mhp->attr.va_fbo = 0; 314 mhp->attr.page_size = 0; 315 mhp->attr.len = ~0ULL; 316 mhp->attr.pbl_size = 0; 317 318 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, 319 FW_RI_STAG_NSMR, mhp->attr.perms, 320 mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0); 321 if (ret) 322 goto err1; 323 324 ret = finish_mem_reg(mhp, stag); 325 if (ret) 326 goto err2; 327 return &mhp->ibmr; 328 err2: 329 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 330 mhp->attr.pbl_addr); 331 err1: 332 kfree(mhp); 333 return ERR_PTR(ret); 334 } 335 336 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 337 u64 virt, int acc, struct ib_udata *udata) 338 { 339 __be64 *pages; 340 int shift, n, len; 341 int i, k, entry; 342 int err = 0; 343 struct scatterlist *sg; 344 struct c4iw_dev *rhp; 345 struct c4iw_pd *php; 346 struct c4iw_mr *mhp; 347 348 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd); 349 350 if (length == ~0ULL) 351 return ERR_PTR(-EINVAL); 352 353 if ((length + start) < start) 354 return ERR_PTR(-EINVAL); 355 356 php = to_c4iw_pd(pd); 357 rhp = php->rhp; 358 359 if (mr_exceeds_hw_limits(rhp, length)) 360 return ERR_PTR(-EINVAL); 361 362 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 363 if (!mhp) 364 return ERR_PTR(-ENOMEM); 365 366 mhp->rhp = rhp; 367 368 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); 369 if (IS_ERR(mhp->umem)) { 370 err = PTR_ERR(mhp->umem); 371 kfree(mhp); 372 return ERR_PTR(err); 373 } 374 375 shift = ffs(mhp->umem->page_size) - 1; 376 377 n = mhp->umem->nmap; 378 err = alloc_pbl(mhp, n); 379 if (err) 380 goto err; 381 382 pages = (__be64 *) __get_free_page(GFP_KERNEL); 383 if (!pages) { 384 err = -ENOMEM; 385 goto err_pbl; 386 } 387 388 i = n = 0; 389 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { 390 len = sg_dma_len(sg) >> shift; 391 for (k = 0; k < len; ++k) { 392 pages[i++] = cpu_to_be64(sg_dma_address(sg) + 393 mhp->umem->page_size * k); 394 if (i == PAGE_SIZE / sizeof *pages) { 395 err = write_pbl(&mhp->rhp->rdev, 396 pages, 397 mhp->attr.pbl_addr + (n << 3), i); 398 if (err) 399 goto pbl_done; 400 n += i; 401 i = 0; 402 403 } 404 } 405 } 406 407 if (i) 408 err = write_pbl(&mhp->rhp->rdev, pages, 409 mhp->attr.pbl_addr + (n << 3), i); 410 411 pbl_done: 412 free_page((unsigned long) pages); 413 if (err) 414 goto err_pbl; 415 416 mhp->attr.pdid = php->pdid; 417 mhp->attr.zbva = 0; 418 mhp->attr.perms = c4iw_ib_to_tpt_access(acc); 419 mhp->attr.va_fbo = virt; 420 mhp->attr.page_size = shift - 12; 421 mhp->attr.len = length; 422 423 err = register_mem(rhp, php, mhp, shift); 424 if (err) 425 goto err_pbl; 426 427 return &mhp->ibmr; 428 429 err_pbl: 430 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, 431 mhp->attr.pbl_size << 3); 432 433 err: 434 ib_umem_release(mhp->umem); 435 kfree(mhp); 436 return ERR_PTR(err); 437 } 438 439 struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, 440 struct ib_udata *udata) 441 { 442 struct c4iw_dev *rhp; 443 struct c4iw_pd *php; 444 struct c4iw_mw *mhp; 445 u32 mmid; 446 u32 stag = 0; 447 int ret; 448 449 if (type != IB_MW_TYPE_1) 450 return ERR_PTR(-EINVAL); 451 452 php = to_c4iw_pd(pd); 453 rhp = php->rhp; 454 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 455 if (!mhp) 456 return ERR_PTR(-ENOMEM); 457 ret = allocate_window(&rhp->rdev, &stag, php->pdid); 458 if (ret) { 459 kfree(mhp); 460 return ERR_PTR(ret); 461 } 462 mhp->rhp = rhp; 463 mhp->attr.pdid = php->pdid; 464 mhp->attr.type = FW_RI_STAG_MW; 465 mhp->attr.stag = stag; 466 mmid = (stag) >> 8; 467 mhp->ibmw.rkey = stag; 468 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { 469 deallocate_window(&rhp->rdev, mhp->attr.stag); 470 kfree(mhp); 471 return ERR_PTR(-ENOMEM); 472 } 473 CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp, 474 stag); 475 return &(mhp->ibmw); 476 } 477 478 int c4iw_dealloc_mw(struct ib_mw *mw) 479 { 480 struct c4iw_dev *rhp; 481 struct c4iw_mw *mhp; 482 u32 mmid; 483 484 mhp = to_c4iw_mw(mw); 485 rhp = mhp->rhp; 486 mmid = (mw->rkey) >> 8; 487 remove_handle(rhp, &rhp->mmidr, mmid); 488 deallocate_window(&rhp->rdev, mhp->attr.stag); 489 kfree(mhp); 490 CTR4(KTR_IW_CXGBE, "%s ib_mw %p mmid 0x%x ptr %p", __func__, mw, mmid, 491 mhp); 492 return 0; 493 } 494 495 struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, 496 enum ib_mr_type mr_type, 497 u32 max_num_sg) 498 { 499 struct c4iw_dev *rhp; 500 struct c4iw_pd *php; 501 struct c4iw_mr *mhp; 502 u32 mmid; 503 u32 stag = 0; 504 int ret = 0; 505 int length = roundup(max_num_sg * sizeof(u64), 32); 506 507 php = to_c4iw_pd(pd); 508 rhp = php->rhp; 509 510 if (mr_type != IB_MR_TYPE_MEM_REG || 511 max_num_sg > t4_max_fr_depth( 512 rhp->rdev.adap->params.ulptx_memwrite_dsgl && use_dsgl)) 513 return ERR_PTR(-EINVAL); 514 515 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 516 if (!mhp) { 517 ret = -ENOMEM; 518 goto err; 519 } 520 521 mhp->mpl = dma_alloc_coherent(rhp->ibdev.dma_device, 522 length, &mhp->mpl_addr, GFP_KERNEL); 523 if (!mhp->mpl) { 524 ret = -ENOMEM; 525 goto err_mpl; 526 } 527 mhp->max_mpl_len = length; 528 529 mhp->rhp = rhp; 530 ret = alloc_pbl(mhp, max_num_sg); 531 if (ret) 532 goto err1; 533 mhp->attr.pbl_size = max_num_sg; 534 ret = allocate_stag(&rhp->rdev, &stag, php->pdid, 535 mhp->attr.pbl_size, mhp->attr.pbl_addr); 536 if (ret) 537 goto err2; 538 mhp->attr.pdid = php->pdid; 539 mhp->attr.type = FW_RI_STAG_NSMR; 540 mhp->attr.stag = stag; 541 mhp->attr.state = 0; 542 mmid = (stag) >> 8; 543 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 544 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { 545 ret = -ENOMEM; 546 goto err3; 547 } 548 549 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); 550 return &(mhp->ibmr); 551 err3: 552 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, 553 mhp->attr.pbl_addr); 554 err2: 555 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, 556 mhp->attr.pbl_size << 3); 557 err1: 558 dma_free_coherent(rhp->ibdev.dma_device, 559 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr); 560 err_mpl: 561 kfree(mhp); 562 err: 563 return ERR_PTR(ret); 564 } 565 static int c4iw_set_page(struct ib_mr *ibmr, u64 addr) 566 { 567 struct c4iw_mr *mhp = to_c4iw_mr(ibmr); 568 569 if (unlikely(mhp->mpl_len == mhp->max_mpl_len)) 570 return -ENOMEM; 571 572 mhp->mpl[mhp->mpl_len++] = addr; 573 574 return 0; 575 } 576 577 int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, 578 int sg_nents, unsigned int *sg_offset) 579 { 580 struct c4iw_mr *mhp = to_c4iw_mr(ibmr); 581 582 mhp->mpl_len = 0; 583 584 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page); 585 } 586 587 588 int c4iw_dereg_mr(struct ib_mr *ib_mr) 589 { 590 struct c4iw_dev *rhp; 591 struct c4iw_mr *mhp; 592 u32 mmid; 593 594 CTR2(KTR_IW_CXGBE, "%s ib_mr %p", __func__, ib_mr); 595 596 mhp = to_c4iw_mr(ib_mr); 597 rhp = mhp->rhp; 598 mmid = mhp->attr.stag >> 8; 599 remove_handle(rhp, &rhp->mmidr, mmid); 600 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 601 mhp->attr.pbl_addr); 602 if (mhp->attr.pbl_size) 603 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, 604 mhp->attr.pbl_size << 3); 605 if (mhp->kva) 606 kfree((void *) (unsigned long) mhp->kva); 607 if (mhp->umem) 608 ib_umem_release(mhp->umem); 609 CTR3(KTR_IW_CXGBE, "%s mmid 0x%x ptr %p", __func__, mmid, mhp); 610 kfree(mhp); 611 return 0; 612 } 613 614 void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey) 615 { 616 struct c4iw_mr *mhp; 617 unsigned long flags; 618 619 spin_lock_irqsave(&rhp->lock, flags); 620 mhp = get_mhp(rhp, rkey >> 8); 621 if (mhp) 622 mhp->attr.state = 0; 623 spin_unlock_irqrestore(&rhp->lock, flags); 624 } 625 #endif 626