Lines Matching refs:mhp
377 static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
381 mhp->attr.state = 1;
382 mhp->attr.stag = stag;
384 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
385 mhp->ibmr.length = mhp->attr.len;
386 mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12);
387 pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
388 return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
392 struct c4iw_mr *mhp, int shift)
397 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
398 FW_RI_STAG_NSMR, mhp->attr.len ?
399 mhp->attr.perms : 0,
400 mhp->attr.mw_bind_enable, mhp->attr.zbva,
401 mhp->attr.va_fbo, mhp->attr.len ?
402 mhp->attr.len : -1, shift - 12,
403 mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL,
404 mhp->wr_waitp);
408 ret = finish_mem_reg(mhp, stag);
410 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
411 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
412 mhp->dereg_skb = NULL;
417 static int alloc_pbl(struct c4iw_mr *mhp, int npages)
419 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
422 if (!mhp->attr.pbl_addr)
425 mhp->attr.pbl_size = npages;
434 struct c4iw_mr *mhp;
442 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
443 if (!mhp)
445 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
446 if (!mhp->wr_waitp) {
450 c4iw_init_wr_wait(mhp->wr_waitp);
452 mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
453 if (!mhp->dereg_skb) {
458 mhp->rhp = rhp;
459 mhp->attr.pdid = php->pdid;
460 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
461 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
462 mhp->attr.zbva = 0;
463 mhp->attr.va_fbo = 0;
464 mhp->attr.page_size = 0;
465 mhp->attr.len = ~0ULL;
466 mhp->attr.pbl_size = 0;
469 FW_RI_STAG_NSMR, mhp->attr.perms,
470 mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0,
471 NULL, mhp->wr_waitp);
475 ret = finish_mem_reg(mhp, stag);
478 return &mhp->ibmr;
480 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
481 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
483 kfree_skb(mhp->dereg_skb);
485 c4iw_put_wr_wait(mhp->wr_waitp);
487 kfree(mhp);
501 struct c4iw_mr *mhp;
520 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
521 if (!mhp)
523 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
524 if (!mhp->wr_waitp)
527 mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
528 if (!mhp->dereg_skb)
531 mhp->rhp = rhp;
533 mhp->umem = ib_umem_get(pd->device, start, length, acc);
534 if (IS_ERR(mhp->umem))
539 n = ib_umem_num_dma_blocks(mhp->umem, 1 << shift);
540 err = alloc_pbl(mhp, n);
552 rdma_umem_for_each_dma_block(mhp->umem, &biter, 1 << shift) {
555 err = write_pbl(&mhp->rhp->rdev, pages,
556 mhp->attr.pbl_addr + (n << 3), i,
557 mhp->wr_waitp);
566 err = write_pbl(&mhp->rhp->rdev, pages,
567 mhp->attr.pbl_addr + (n << 3), i,
568 mhp->wr_waitp);
575 mhp->attr.pdid = php->pdid;
576 mhp->attr.zbva = 0;
577 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
578 mhp->attr.va_fbo = virt;
579 mhp->attr.page_size = shift - 12;
580 mhp->attr.len = length;
582 err = register_mem(rhp, php, mhp, shift);
586 return &mhp->ibmr;
589 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
590 mhp->attr.pbl_size << 3);
592 ib_umem_release(mhp->umem);
594 kfree_skb(mhp->dereg_skb);
596 c4iw_put_wr_wait(mhp->wr_waitp);
598 kfree(mhp);
607 struct c4iw_mr *mhp;
621 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
622 if (!mhp) {
627 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
628 if (!mhp->wr_waitp) {
632 c4iw_init_wr_wait(mhp->wr_waitp);
634 mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev,
635 length, &mhp->mpl_addr, GFP_KERNEL);
636 if (!mhp->mpl) {
640 mhp->max_mpl_len = length;
642 mhp->rhp = rhp;
643 ret = alloc_pbl(mhp, max_num_sg);
646 mhp->attr.pbl_size = max_num_sg;
648 mhp->attr.pbl_size, mhp->attr.pbl_addr,
649 mhp->wr_waitp);
652 mhp->attr.pdid = php->pdid;
653 mhp->attr.type = FW_RI_STAG_NSMR;
654 mhp->attr.stag = stag;
655 mhp->attr.state = 0;
657 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
658 if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
663 pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
664 return &(mhp->ibmr);
666 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
667 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
669 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
670 mhp->attr.pbl_size << 3);
672 dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
673 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
675 c4iw_put_wr_wait(mhp->wr_waitp);
677 kfree(mhp);
684 struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
686 if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
689 mhp->mpl[mhp->mpl_len++] = addr;
697 struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
699 mhp->mpl_len = 0;
707 struct c4iw_mr *mhp;
712 mhp = to_c4iw_mr(ib_mr);
713 rhp = mhp->rhp;
714 mmid = mhp->attr.stag >> 8;
716 if (mhp->mpl)
717 dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
718 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
719 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
720 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
721 if (mhp->attr.pbl_size)
722 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
723 mhp->attr.pbl_size << 3);
724 if (mhp->kva)
725 kfree((void *) (unsigned long) mhp->kva);
726 ib_umem_release(mhp->umem);
727 pr_debug("mmid 0x%x ptr %p\n", mmid, mhp);
728 c4iw_put_wr_wait(mhp->wr_waitp);
729 kfree(mhp);
735 struct c4iw_mr *mhp;
739 mhp = xa_load(&rhp->mrs, rkey >> 8);
740 if (mhp)
741 mhp->attr.state = 0;