1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 39 #ifdef TCP_OFFLOAD 40 #include <linux/types.h> 41 #include <linux/kref.h> 42 #include <rdma/ib_umem.h> 43 #include <asm/atomic.h> 44 45 #include <common/t4_msg.h> 46 #include "iw_cxgbe.h" 47 48 #define T4_ULPTX_MIN_IO 32 49 #define C4IW_MAX_INLINE_SIZE 96 50 #define T4_ULPTX_MAX_DMA 1024 51 52 static int 53 mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length) 54 { 55 56 return (is_t5(dev->rdev.adap) && length >= 8*1024*1024*1024ULL); 57 } 58 59 static int 60 _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, u32 len, 61 void *data, int wait) 62 { 63 struct adapter *sc = rdev->adap; 64 struct ulp_mem_io *ulpmc; 65 struct ulptx_sgl *sgl; 66 u8 wr_len; 67 int ret = 0; 68 struct c4iw_wr_wait wr_wait; 69 struct wrqe *wr; 70 71 addr &= 0x7FFFFFF; 72 73 if (wait) 74 c4iw_init_wr_wait(&wr_wait); 75 wr_len = roundup(sizeof *ulpmc + sizeof *sgl, 16); 76 77 wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]); 78 if (wr == NULL) 79 return -ENOMEM; 80 ulpmc = wrtod(wr); 81 82 memset(ulpmc, 0, wr_len); 83 INIT_ULPTX_WR(ulpmc, wr_len, 0, 0); 84 ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) | 85 (wait ? F_FW_WR_COMPL : 0)); 86 ulpmc->wr.wr_lo = wait ? (u64)(unsigned long)&wr_wait : 0; 87 ulpmc->wr.wr_mid = cpu_to_be32(V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); 88 ulpmc->cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) | 89 V_T5_ULP_MEMIO_ORDER(1) | 90 V_T5_ULP_MEMIO_FID(sc->sge.ofld_rxq[0].iq.abs_id)); 91 ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(len>>5)); 92 ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr), 16)); 93 ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr)); 94 95 sgl = (struct ulptx_sgl *)(ulpmc + 1); 96 sgl->cmd_nsge = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 97 V_ULPTX_NSGE(1)); 98 sgl->len0 = cpu_to_be32(len); 99 sgl->addr0 = cpu_to_be64((u64)data); 100 101 t4_wrq_tx(sc, wr); 102 103 if (wait) 104 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__); 105 return ret; 106 } 107 108 109 static int 110 _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) 111 { 112 struct adapter *sc = rdev->adap; 113 struct ulp_mem_io *ulpmc; 114 struct ulptx_idata *ulpsc; 115 u8 wr_len, *to_dp, *from_dp; 116 int copy_len, num_wqe, i, ret = 0; 117 struct c4iw_wr_wait wr_wait; 118 struct wrqe *wr; 119 u32 cmd; 120 121 cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 122 123 cmd |= cpu_to_be32(F_T5_ULP_MEMIO_IMM); 124 125 addr &= 0x7FFFFFF; 126 CTR3(KTR_IW_CXGBE, "%s addr 0x%x len %u", __func__, addr, len); 127 num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE); 128 c4iw_init_wr_wait(&wr_wait); 129 for (i = 0; i < num_wqe; i++) { 130 131 copy_len = min(len, C4IW_MAX_INLINE_SIZE); 132 wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc + 133 roundup(copy_len, T4_ULPTX_MIN_IO), 16); 134 135 wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]); 136 if (wr == NULL) 137 return -ENOMEM; 138 ulpmc = wrtod(wr); 139 140 memset(ulpmc, 0, wr_len); 141 INIT_ULPTX_WR(ulpmc, wr_len, 0, 0); 142 143 if (i == (num_wqe-1)) { 144 ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) | 145 F_FW_WR_COMPL); 146 ulpmc->wr.wr_lo = 147 (__force __be64)(unsigned long) &wr_wait; 148 } else 149 ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR)); 150 ulpmc->wr.wr_mid = cpu_to_be32( 151 V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); 152 153 ulpmc->cmd = cmd; 154 ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN( 155 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO))); 156 ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr), 157 16)); 158 ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr + i * 3)); 159 160 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 161 ulpsc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 162 ulpsc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO)); 163 164 to_dp = (u8 *)(ulpsc + 1); 165 from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE; 166 if (data) 167 memcpy(to_dp, from_dp, copy_len); 168 else 169 memset(to_dp, 0, copy_len); 170 if (copy_len % T4_ULPTX_MIN_IO) 171 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO - 172 (copy_len % T4_ULPTX_MIN_IO)); 173 t4_wrq_tx(sc, wr); 174 len -= C4IW_MAX_INLINE_SIZE; 175 } 176 177 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__); 178 return ret; 179 } 180 181 static int 182 _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) 183 { 184 struct c4iw_dev *rhp = rdev_to_c4iw_dev(rdev); 185 u32 remain = len; 186 u32 dmalen; 187 int ret = 0; 188 dma_addr_t daddr; 189 dma_addr_t save; 190 191 daddr = dma_map_single(rhp->ibdev.dma_device, data, len, DMA_TO_DEVICE); 192 if (dma_mapping_error(rhp->ibdev.dma_device, daddr)) 193 return -1; 194 save = daddr; 195 196 while (remain > inline_threshold) { 197 if (remain < T4_ULPTX_MAX_DMA) { 198 if (remain & ~T4_ULPTX_MIN_IO) 199 dmalen = remain & ~(T4_ULPTX_MIN_IO-1); 200 else 201 dmalen = remain; 202 } else 203 dmalen = T4_ULPTX_MAX_DMA; 204 remain -= dmalen; 205 ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, 206 (void *)daddr, !remain); 207 if (ret) 208 goto out; 209 addr += dmalen >> 5; 210 data = (u8 *)data + dmalen; 211 daddr = daddr + dmalen; 212 } 213 if (remain) 214 ret = _c4iw_write_mem_inline(rdev, addr, remain, data); 215 out: 216 dma_unmap_single(rhp->ibdev.dma_device, save, len, DMA_TO_DEVICE); 217 return ret; 218 } 219 220 /* 221 * write len bytes of data into addr (32B aligned address) 222 * If data is NULL, clear len byte of memory to zero. 223 */ 224 static int 225 write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, 226 void *data) 227 { 228 if (rdev->adap->params.ulptx_memwrite_dsgl && use_dsgl) { 229 if (len > inline_threshold) { 230 if (_c4iw_write_mem_dma(rdev, addr, len, data)) { 231 log(LOG_ERR, "%s: dma map " 232 "failure (non fatal)\n", __func__); 233 return _c4iw_write_mem_inline(rdev, addr, len, 234 data); 235 } else 236 return 0; 237 } else 238 return _c4iw_write_mem_inline(rdev, addr, len, data); 239 } else 240 return _c4iw_write_mem_inline(rdev, addr, len, data); 241 } 242 243 244 /* 245 * Build and write a TPT entry. 246 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size, 247 * pbl_size and pbl_addr 248 * OUT: stag index 249 */ 250 static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, 251 u32 *stag, u8 stag_state, u32 pdid, 252 enum fw_ri_stag_type type, enum fw_ri_mem_perms perm, 253 int bind_enabled, u32 zbva, u64 to, 254 u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr) 255 { 256 int err; 257 struct fw_ri_tpte tpt; 258 u32 stag_idx; 259 static atomic_t key; 260 261 if (c4iw_fatal_error(rdev)) 262 return -EIO; 263 264 stag_state = stag_state > 0; 265 stag_idx = (*stag) >> 8; 266 267 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) { 268 stag_idx = c4iw_get_resource(&rdev->resource.tpt_table); 269 if (!stag_idx) { 270 mutex_lock(&rdev->stats.lock); 271 rdev->stats.stag.fail++; 272 mutex_unlock(&rdev->stats.lock); 273 return -ENOMEM; 274 } 275 mutex_lock(&rdev->stats.lock); 276 rdev->stats.stag.cur += 32; 277 if (rdev->stats.stag.cur > rdev->stats.stag.max) 278 rdev->stats.stag.max = rdev->stats.stag.cur; 279 mutex_unlock(&rdev->stats.lock); 280 *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); 281 } 282 CTR5(KTR_IW_CXGBE, 283 "%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x", 284 __func__, stag_state, type, pdid, stag_idx); 285 286 /* write TPT entry */ 287 if (reset_tpt_entry) 288 memset(&tpt, 0, sizeof(tpt)); 289 else { 290 tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID | 291 V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) | 292 V_FW_RI_TPTE_STAGSTATE(stag_state) | 293 V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid)); 294 tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) | 295 (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) | 296 V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO : 297 FW_RI_VA_BASED_TO))| 298 V_FW_RI_TPTE_PS(page_size)); 299 tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32( 300 V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3)); 301 tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL)); 302 tpt.va_hi = cpu_to_be32((u32)(to >> 32)); 303 tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL)); 304 tpt.dca_mwbcnt_pstag = cpu_to_be32(0); 305 tpt.len_hi = cpu_to_be32((u32)(len >> 32)); 306 } 307 err = write_adapter_mem(rdev, stag_idx + 308 (rdev->adap->vres.stag.start >> 5), 309 sizeof(tpt), &tpt); 310 311 if (reset_tpt_entry) { 312 c4iw_put_resource(&rdev->resource.tpt_table, stag_idx); 313 mutex_lock(&rdev->stats.lock); 314 rdev->stats.stag.cur -= 32; 315 mutex_unlock(&rdev->stats.lock); 316 } 317 return err; 318 } 319 320 static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl, 321 u32 pbl_addr, u32 pbl_size) 322 { 323 int err; 324 325 CTR4(KTR_IW_CXGBE, "%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d", 326 __func__, pbl_addr, rdev->adap->vres.pbl.start, pbl_size); 327 328 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl); 329 return err; 330 } 331 332 static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size, 333 u32 pbl_addr) 334 { 335 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 336 pbl_size, pbl_addr); 337 } 338 339 static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid) 340 { 341 *stag = T4_STAG_UNSET; 342 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0, 343 0UL, 0, 0, 0, 0); 344 } 345 346 static int deallocate_window(struct c4iw_rdev *rdev, u32 stag) 347 { 348 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0, 349 0); 350 } 351 352 static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid, 353 u32 pbl_size, u32 pbl_addr) 354 { 355 *stag = T4_STAG_UNSET; 356 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0, 357 0UL, 0, 0, pbl_size, pbl_addr); 358 } 359 360 static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag) 361 { 362 u32 mmid; 363 364 mhp->attr.state = 1; 365 mhp->attr.stag = stag; 366 mmid = stag >> 8; 367 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 368 CTR3(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p", __func__, mmid, mhp); 369 return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid); 370 } 371 372 static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, 373 struct c4iw_mr *mhp, int shift) 374 { 375 u32 stag = T4_STAG_UNSET; 376 int ret; 377 378 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, 379 FW_RI_STAG_NSMR, mhp->attr.len ? mhp->attr.perms : 0, 380 mhp->attr.mw_bind_enable, mhp->attr.zbva, 381 mhp->attr.va_fbo, mhp->attr.len ? mhp->attr.len : -1, shift - 12, 382 mhp->attr.pbl_size, mhp->attr.pbl_addr); 383 if (ret) 384 return ret; 385 386 ret = finish_mem_reg(mhp, stag); 387 if (ret) 388 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 389 mhp->attr.pbl_addr); 390 return ret; 391 } 392 393 static int alloc_pbl(struct c4iw_mr *mhp, int npages) 394 { 395 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev, 396 npages << 3); 397 398 if (!mhp->attr.pbl_addr) 399 return -ENOMEM; 400 401 mhp->attr.pbl_size = npages; 402 403 return 0; 404 } 405 406 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) 407 { 408 struct c4iw_dev *rhp; 409 struct c4iw_pd *php; 410 struct c4iw_mr *mhp; 411 int ret; 412 u32 stag = T4_STAG_UNSET; 413 414 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd); 415 php = to_c4iw_pd(pd); 416 rhp = php->rhp; 417 418 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 419 if (!mhp) 420 return ERR_PTR(-ENOMEM); 421 422 mhp->rhp = rhp; 423 mhp->attr.pdid = php->pdid; 424 mhp->attr.perms = c4iw_ib_to_tpt_access(acc); 425 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND; 426 mhp->attr.zbva = 0; 427 mhp->attr.va_fbo = 0; 428 mhp->attr.page_size = 0; 429 mhp->attr.len = ~0ULL; 430 mhp->attr.pbl_size = 0; 431 432 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, 433 FW_RI_STAG_NSMR, mhp->attr.perms, 434 mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0); 435 if (ret) 436 goto err1; 437 438 ret = finish_mem_reg(mhp, stag); 439 if (ret) 440 goto err2; 441 return &mhp->ibmr; 442 err2: 443 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 444 mhp->attr.pbl_addr); 445 err1: 446 kfree(mhp); 447 return ERR_PTR(ret); 448 } 449 450 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 451 u64 virt, int acc, struct ib_udata *udata) 452 { 453 __be64 *pages; 454 int shift, n, len; 455 int i, k, entry; 456 int err = 0; 457 struct scatterlist *sg; 458 struct c4iw_dev *rhp; 459 struct c4iw_pd *php; 460 struct c4iw_mr *mhp; 461 462 CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd); 463 464 if (length == ~0ULL) 465 return ERR_PTR(-EINVAL); 466 467 if ((length + start) < start) 468 return ERR_PTR(-EINVAL); 469 470 php = to_c4iw_pd(pd); 471 rhp = php->rhp; 472 473 if (mr_exceeds_hw_limits(rhp, length)) 474 return ERR_PTR(-EINVAL); 475 476 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 477 if (!mhp) 478 return ERR_PTR(-ENOMEM); 479 480 mhp->rhp = rhp; 481 482 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); 483 if (IS_ERR(mhp->umem)) { 484 err = PTR_ERR(mhp->umem); 485 kfree(mhp); 486 return ERR_PTR(err); 487 } 488 489 shift = ffs(mhp->umem->page_size) - 1; 490 491 n = mhp->umem->nmap; 492 err = alloc_pbl(mhp, n); 493 if (err) 494 goto err; 495 496 pages = (__be64 *) __get_free_page(GFP_KERNEL); 497 if (!pages) { 498 err = -ENOMEM; 499 goto err_pbl; 500 } 501 502 i = n = 0; 503 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { 504 len = sg_dma_len(sg) >> shift; 505 for (k = 0; k < len; ++k) { 506 pages[i++] = cpu_to_be64(sg_dma_address(sg) + 507 mhp->umem->page_size * k); 508 if (i == PAGE_SIZE / sizeof *pages) { 509 err = write_pbl(&mhp->rhp->rdev, 510 pages, 511 mhp->attr.pbl_addr + (n << 3), i); 512 if (err) 513 goto pbl_done; 514 n += i; 515 i = 0; 516 517 } 518 } 519 } 520 521 if (i) 522 err = write_pbl(&mhp->rhp->rdev, pages, 523 mhp->attr.pbl_addr + (n << 3), i); 524 525 pbl_done: 526 free_page((unsigned long) pages); 527 if (err) 528 goto err_pbl; 529 530 mhp->attr.pdid = php->pdid; 531 mhp->attr.zbva = 0; 532 mhp->attr.perms = c4iw_ib_to_tpt_access(acc); 533 mhp->attr.va_fbo = virt; 534 mhp->attr.page_size = shift - 12; 535 mhp->attr.len = length; 536 537 err = register_mem(rhp, php, mhp, shift); 538 if (err) 539 goto err_pbl; 540 541 return &mhp->ibmr; 542 543 err_pbl: 544 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, 545 mhp->attr.pbl_size << 3); 546 547 err: 548 ib_umem_release(mhp->umem); 549 kfree(mhp); 550 return ERR_PTR(err); 551 } 552 553 struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, 554 struct ib_udata *udata) 555 { 556 struct c4iw_dev *rhp; 557 struct c4iw_pd *php; 558 struct c4iw_mw *mhp; 559 u32 mmid; 560 u32 stag = 0; 561 int ret; 562 563 if (type != IB_MW_TYPE_1) 564 return ERR_PTR(-EINVAL); 565 566 php = to_c4iw_pd(pd); 567 rhp = php->rhp; 568 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 569 if (!mhp) 570 return ERR_PTR(-ENOMEM); 571 ret = allocate_window(&rhp->rdev, &stag, php->pdid); 572 if (ret) { 573 kfree(mhp); 574 return ERR_PTR(ret); 575 } 576 mhp->rhp = rhp; 577 mhp->attr.pdid = php->pdid; 578 mhp->attr.type = FW_RI_STAG_MW; 579 mhp->attr.stag = stag; 580 mmid = (stag) >> 8; 581 mhp->ibmw.rkey = stag; 582 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { 583 deallocate_window(&rhp->rdev, mhp->attr.stag); 584 kfree(mhp); 585 return ERR_PTR(-ENOMEM); 586 } 587 CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp, 588 stag); 589 return &(mhp->ibmw); 590 } 591 592 int c4iw_dealloc_mw(struct ib_mw *mw) 593 { 594 struct c4iw_dev *rhp; 595 struct c4iw_mw *mhp; 596 u32 mmid; 597 598 mhp = to_c4iw_mw(mw); 599 rhp = mhp->rhp; 600 mmid = (mw->rkey) >> 8; 601 remove_handle(rhp, &rhp->mmidr, mmid); 602 deallocate_window(&rhp->rdev, mhp->attr.stag); 603 kfree(mhp); 604 CTR4(KTR_IW_CXGBE, "%s ib_mw %p mmid 0x%x ptr %p", __func__, mw, mmid, 605 mhp); 606 return 0; 607 } 608 609 struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, 610 enum ib_mr_type mr_type, 611 u32 max_num_sg) 612 { 613 struct c4iw_dev *rhp; 614 struct c4iw_pd *php; 615 struct c4iw_mr *mhp; 616 u32 mmid; 617 u32 stag = 0; 618 int ret = 0; 619 int length = roundup(max_num_sg * sizeof(u64), 32); 620 621 php = to_c4iw_pd(pd); 622 rhp = php->rhp; 623 624 if (mr_type != IB_MR_TYPE_MEM_REG || 625 max_num_sg > t4_max_fr_depth( 626 rhp->rdev.adap->params.ulptx_memwrite_dsgl && use_dsgl)) 627 return ERR_PTR(-EINVAL); 628 629 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 630 if (!mhp) { 631 ret = -ENOMEM; 632 goto err; 633 } 634 635 mhp->mpl = dma_alloc_coherent(rhp->ibdev.dma_device, 636 length, &mhp->mpl_addr, GFP_KERNEL); 637 if (!mhp->mpl) { 638 ret = -ENOMEM; 639 goto err_mpl; 640 } 641 mhp->max_mpl_len = length; 642 643 mhp->rhp = rhp; 644 ret = alloc_pbl(mhp, max_num_sg); 645 if (ret) 646 goto err1; 647 mhp->attr.pbl_size = max_num_sg; 648 ret = allocate_stag(&rhp->rdev, &stag, php->pdid, 649 mhp->attr.pbl_size, mhp->attr.pbl_addr); 650 if (ret) 651 goto err2; 652 mhp->attr.pdid = php->pdid; 653 mhp->attr.type = FW_RI_STAG_NSMR; 654 mhp->attr.stag = stag; 655 mhp->attr.state = 0; 656 mmid = (stag) >> 8; 657 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 658 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { 659 ret = -ENOMEM; 660 goto err3; 661 } 662 663 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); 664 return &(mhp->ibmr); 665 err3: 666 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, 667 mhp->attr.pbl_addr); 668 err2: 669 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, 670 mhp->attr.pbl_size << 3); 671 err1: 672 dma_free_coherent(rhp->ibdev.dma_device, 673 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr); 674 err_mpl: 675 kfree(mhp); 676 err: 677 return ERR_PTR(ret); 678 } 679 static int c4iw_set_page(struct ib_mr *ibmr, u64 addr) 680 { 681 struct c4iw_mr *mhp = to_c4iw_mr(ibmr); 682 683 if (unlikely(mhp->mpl_len == mhp->max_mpl_len)) 684 return -ENOMEM; 685 686 mhp->mpl[mhp->mpl_len++] = addr; 687 688 return 0; 689 } 690 691 int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, 692 int sg_nents, unsigned int *sg_offset) 693 { 694 struct c4iw_mr *mhp = to_c4iw_mr(ibmr); 695 696 mhp->mpl_len = 0; 697 698 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page); 699 } 700 701 702 int c4iw_dereg_mr(struct ib_mr *ib_mr) 703 { 704 struct c4iw_dev *rhp; 705 struct c4iw_mr *mhp; 706 u32 mmid; 707 708 CTR2(KTR_IW_CXGBE, "%s ib_mr %p", __func__, ib_mr); 709 710 mhp = to_c4iw_mr(ib_mr); 711 rhp = mhp->rhp; 712 mmid = mhp->attr.stag >> 8; 713 remove_handle(rhp, &rhp->mmidr, mmid); 714 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 715 mhp->attr.pbl_addr); 716 if (mhp->attr.pbl_size) 717 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, 718 mhp->attr.pbl_size << 3); 719 if (mhp->kva) 720 kfree((void *) (unsigned long) mhp->kva); 721 if (mhp->umem) 722 ib_umem_release(mhp->umem); 723 CTR3(KTR_IW_CXGBE, "%s mmid 0x%x ptr %p", __func__, mmid, mhp); 724 kfree(mhp); 725 return 0; 726 } 727 728 void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey) 729 { 730 struct c4iw_mr *mhp; 731 unsigned long flags; 732 733 spin_lock_irqsave(&rhp->lock, flags); 734 mhp = get_mhp(rhp, rkey >> 8); 735 if (mhp) 736 mhp->attr.state = 0; 737 spin_unlock_irqrestore(&rhp->lock, flags); 738 } 739 #endif 740