Lines Matching +full:wr +full:- +full:hold

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
4 /* Copyright (c) 2008-2019, IBM Corporation */
57 size_t size = vma->vm_end - vma->vm_start;
60 int rv = -EINVAL;
65 if (vma->vm_start & (PAGE_SIZE - 1)) {
67 return -EINVAL;
69 rdma_entry = rdma_user_mmap_entry_get(&uctx->base_ucontext, vma);
71 siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %#zx\n",
72 vma->vm_pgoff, size);
73 return -EINVAL;
77 rv = remap_vmalloc_range(vma, entry->address, 0);
79 pr_warn("remap_vmalloc_range failed: %lu, %zu\n", vma->vm_pgoff,
88 struct siw_device *sdev = to_siw_dev(base_ctx->device);
93 if (atomic_inc_return(&sdev->num_ctx) > SIW_MAX_CONTEXT) {
94 rv = -ENOMEM;
97 ctx->sdev = sdev;
99 uresp.dev_id = sdev->vendor_part_id;
101 if (udata->outlen < sizeof(uresp)) {
102 rv = -EINVAL;
109 siw_dbg(base_ctx->device, "success. now %d context(s)\n",
110 atomic_read(&sdev->num_ctx));
115 atomic_dec(&sdev->num_ctx);
116 siw_dbg(base_ctx->device, "failure %d. now %d context(s)\n", rv,
117 atomic_read(&sdev->num_ctx));
126 atomic_dec(&uctx->sdev->num_ctx);
134 if (udata->inlen || udata->outlen)
135 return -EINVAL;
140 attr->atomic_cap = 0;
141 attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS;
142 attr->kernel_cap_flags = IBK_ALLOW_USER_UNREG;
143 attr->max_cq = sdev->attrs.max_cq;
144 attr->max_cqe = sdev->attrs.max_cqe;
145 attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL;
146 attr->max_mr = sdev->attrs.max_mr;
147 attr->max_mw = sdev->attrs.max_mw;
148 attr->max_mr_size = ~0ull;
149 attr->max_pd = sdev->attrs.max_pd;
150 attr->max_qp = sdev->attrs.max_qp;
151 attr->max_qp_init_rd_atom = sdev->attrs.max_ird;
152 attr->max_qp_rd_atom = sdev->attrs.max_ord;
153 attr->max_qp_wr = sdev->attrs.max_qp_wr;
154 attr->max_recv_sge = sdev->attrs.max_sge;
155 attr->max_res_rd_atom = sdev->attrs.max_qp * sdev->attrs.max_ird;
156 attr->max_send_sge = sdev->attrs.max_sge;
157 attr->max_sge_rd = sdev->attrs.max_sge_rd;
158 attr->max_srq = sdev->attrs.max_srq;
159 attr->max_srq_sge = sdev->attrs.max_srq_sge;
160 attr->max_srq_wr = sdev->attrs.max_srq_wr;
161 attr->page_size_cap = PAGE_SIZE;
162 attr->vendor_id = SIW_VENDOR_ID;
163 attr->vendor_part_id = sdev->vendor_part_id;
165 addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
166 sdev->raw_gid);
179 rv = ib_get_eth_speed(base_dev, port, &attr->active_speed,
180 &attr->active_width);
186 return -ENODEV;
188 attr->gid_tbl_len = 1;
189 attr->max_msg_sz = -1;
190 attr->max_mtu = ib_mtu_int_to_enum(ndev->max_mtu);
191 attr->active_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
192 attr->state = ib_get_curr_port_state(ndev);
193 attr->phys_state = attr->state == IB_PORT_ACTIVE ?
195 attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
199 * attr->lid = 0;
200 * attr->bad_pkey_cntr = 0;
201 * attr->qkey_viol_cntr = 0;
202 * attr->sm_lid = 0;
203 * attr->lmc = 0;
204 * attr->max_vl_num = 0;
205 * attr->sm_sl = 0;
206 * attr->subnet_timeout = 0;
207 * attr->init_type_repy = 0;
222 port_immutable->gid_tbl_len = attr.gid_tbl_len;
223 port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
235 memcpy(gid->raw, sdev->raw_gid, ETH_ALEN);
242 struct siw_device *sdev = to_siw_dev(pd->device);
244 if (atomic_inc_return(&sdev->num_pd) > SIW_MAX_PD) {
245 atomic_dec(&sdev->num_pd);
246 return -ENOMEM;
248 siw_dbg_pd(pd, "now %d PD's(s)\n", atomic_read(&sdev->num_pd));
255 struct siw_device *sdev = to_siw_dev(pd->device);
258 atomic_dec(&sdev->num_pd);
284 entry->address = address;
286 rv = rdma_user_mmap_entry_insert(&uctx->base_ucontext,
287 &entry->rdma_entry,
294 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
296 return &entry->rdma_entry;
312 struct ib_pd *pd = ibqp->pd;
314 struct ib_device *base_dev = pd->device;
325 if (attrs->create_flags)
326 return -EOPNOTSUPP;
328 if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
330 rv = -ENOMEM;
333 if (attrs->qp_type != IB_QPT_RC) {
335 rv = -EOPNOTSUPP;
338 if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) ||
339 (attrs->cap.max_recv_wr > SIW_MAX_QP_WR) ||
340 (attrs->cap.max_send_sge > SIW_MAX_SGE) ||
341 (attrs->cap.max_recv_sge > SIW_MAX_SGE)) {
343 rv = -EINVAL;
346 if (attrs->cap.max_inline_data > SIW_MAX_INLINE) {
348 attrs->cap.max_inline_data, (int)SIW_MAX_INLINE);
349 rv = -EINVAL;
353 * NOTE: we don't allow for a QP unable to hold any SQ WQE
355 if (attrs->cap.max_send_wr == 0) {
357 rv = -EINVAL;
361 if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
363 rv = -EINVAL;
367 init_rwsem(&qp->state_lock);
368 spin_lock_init(&qp->sq_lock);
369 spin_lock_init(&qp->rq_lock);
370 spin_lock_init(&qp->orq_lock);
382 num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr);
383 num_rqe = attrs->cap.max_recv_wr;
388 qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
390 qp->sendq = vcalloc(num_sqe, sizeof(struct siw_sqe));
392 if (qp->sendq == NULL) {
393 rv = -ENOMEM;
396 if (attrs->sq_sig_type != IB_SIGNAL_REQ_WR) {
397 if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
398 qp->attrs.flags |= SIW_SIGNAL_ALL_WR;
400 rv = -EINVAL;
404 qp->pd = pd;
405 qp->scq = to_siw_cq(attrs->send_cq);
406 qp->rcq = to_siw_cq(attrs->recv_cq);
408 if (attrs->srq) {
414 qp->srq = to_siw_srq(attrs->srq);
415 qp->attrs.rq_size = 0;
417 qp->base_qp.qp_num);
420 qp->recvq =
423 qp->recvq = vcalloc(num_rqe, sizeof(struct siw_rqe));
425 if (qp->recvq == NULL) {
426 rv = -ENOMEM;
429 qp->attrs.rq_size = num_rqe;
431 qp->attrs.sq_size = num_sqe;
432 qp->attrs.sq_max_sges = attrs->cap.max_send_sge;
433 qp->attrs.rq_max_sges = attrs->cap.max_recv_sge;
436 qp->tx_ctx.gso_seg_limit = 1;
437 qp->tx_ctx.zcopy_tx = zcopy_tx;
439 qp->attrs.state = SIW_QP_STATE_IDLE;
448 if (qp->sendq) {
450 qp->sq_entry =
451 siw_mmap_entry_insert(uctx, qp->sendq,
453 if (!qp->sq_entry) {
454 rv = -ENOMEM;
459 if (qp->recvq) {
461 qp->rq_entry =
462 siw_mmap_entry_insert(uctx, qp->recvq,
464 if (!qp->rq_entry) {
466 rv = -ENOMEM;
471 if (udata->outlen < sizeof(uresp)) {
472 rv = -EINVAL;
479 qp->tx_cpu = siw_get_tx_cpu(sdev);
480 if (qp->tx_cpu < 0) {
481 rv = -EINVAL;
484 INIT_LIST_HEAD(&qp->devq);
485 spin_lock_irqsave(&sdev->lock, flags);
486 list_add_tail(&qp->devq, &sdev->qp_list);
487 spin_unlock_irqrestore(&sdev->lock, flags);
489 init_completion(&qp->qp_free);
494 xa_erase(&sdev->qp_xa, qp_id(qp));
496 rdma_user_mmap_entry_remove(qp->sq_entry);
497 rdma_user_mmap_entry_remove(qp->rq_entry);
499 vfree(qp->sendq);
500 vfree(qp->recvq);
503 atomic_dec(&sdev->num_qp);
521 return -EINVAL;
523 ndev = ib_device_get_netdev(base_qp->device, SIW_PORT);
525 return -ENODEV;
527 qp_attr->qp_state = siw_qp_state_to_ib_qp_state[qp->attrs.state];
528 qp_attr->cap.max_inline_data = SIW_MAX_INLINE;
529 qp_attr->cap.max_send_wr = qp->attrs.sq_size;
530 qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
531 qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
532 qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges;
533 qp_attr->path_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
534 qp_attr->max_rd_atomic = qp->attrs.irq_size;
535 qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
537 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
541 qp_init_attr->qp_type = base_qp->qp_type;
542 qp_init_attr->send_cq = base_qp->send_cq;
543 qp_init_attr->recv_cq = base_qp->recv_cq;
544 qp_init_attr->srq = base_qp->srq;
546 qp_init_attr->cap = qp_attr->cap;
564 return -EOPNOTSUPP;
571 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
573 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
575 if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
580 ib_qp_state_to_string[attr->qp_state]);
582 new_attrs.state = ib_qp_state_to_siw_qp_state[attr->qp_state];
585 qp->tx_ctx.tx_suspend = 1;
592 down_write(&qp->state_lock);
596 up_write(&qp->state_lock);
609 siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
615 qp->attrs.flags |= SIW_QP_IN_DESTROY;
616 qp->rx_stream.rx_suspend = 1;
619 rdma_user_mmap_entry_remove(qp->sq_entry);
620 rdma_user_mmap_entry_remove(qp->rq_entry);
623 down_write(&qp->state_lock);
628 if (qp->cep) {
629 siw_cep_put(qp->cep);
630 qp->cep = NULL;
632 up_write(&qp->state_lock);
634 qp->scq = qp->rcq = NULL;
637 wait_for_completion(&qp->qp_free);
654 struct ib_sge *core_sge = core_wr->sg_list;
655 void *kbuf = &sqe->sge[1];
656 int num_sge = core_wr->num_sge, bytes = 0;
658 sqe->sge[0].laddr = (uintptr_t)kbuf;
659 sqe->sge[0].lkey = 0;
661 while (num_sge--) {
662 if (!core_sge->length) {
666 bytes += core_sge->length;
668 bytes = -EINVAL;
671 memcpy(kbuf, ib_virt_dma_to_ptr(core_sge->addr),
672 core_sge->length);
674 kbuf += core_sge->length;
677 sqe->sge[0].length = max(bytes, 0);
678 sqe->num_sge = bytes > 0 ? 1 : 0;
683 /* Complete SQ WR's without processing */
684 static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr,
689 while (wr) {
692 switch (wr->opcode) {
718 rv = -EINVAL;
722 sqe.id = wr->wr_id;
728 *bad_wr = wr;
731 wr = wr->next;
736 /* Complete RQ WR's without processing */
737 static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr,
743 while (wr) {
744 rqe.id = wr->wr_id;
748 *bad_wr = wr;
751 wr = wr->next;
759 * Post a list of S-WR's to a SQ.
762 * @wr: Null terminated list of user WR's
763 * @bad_wr: Points to failing WR in case of synchronous failure.
765 int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
774 if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
775 siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
776 *bad_wr = wr;
777 return -EINVAL;
781 * Try to acquire QP state lock. Must be non-blocking
784 if (!down_read_trylock(&qp->state_lock)) {
785 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
795 rv = siw_sq_flush_wr(qp, wr, bad_wr);
798 qp->attrs.state);
799 *bad_wr = wr;
800 rv = -ENOTCONN;
804 if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) {
805 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
807 * Immediately flush this WR to CQ, if QP
809 * be empty, so WR complets in-order.
813 rv = siw_sq_flush_wr(qp, wr, bad_wr);
816 qp->attrs.state);
817 *bad_wr = wr;
818 rv = -ENOTCONN;
820 up_read(&qp->state_lock);
823 spin_lock_irqsave(&qp->sq_lock, flags);
825 while (wr) {
826 u32 idx = qp->sq_put % qp->attrs.sq_size;
827 struct siw_sqe *sqe = &qp->sendq[idx];
829 if (sqe->flags) {
831 rv = -ENOMEM;
834 if (wr->num_sge > qp->attrs.sq_max_sges) {
835 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
836 rv = -EINVAL;
839 sqe->id = wr->wr_id;
841 if ((wr->send_flags & IB_SEND_SIGNALED) ||
842 (qp->attrs.flags & SIW_SIGNAL_ALL_WR))
843 sqe->flags |= SIW_WQE_SIGNALLED;
845 if (wr->send_flags & IB_SEND_FENCE)
846 sqe->flags |= SIW_WQE_READ_FENCE;
848 switch (wr->opcode) {
851 if (wr->send_flags & IB_SEND_SOLICITED)
852 sqe->flags |= SIW_WQE_SOLICITED;
854 if (!(wr->send_flags & IB_SEND_INLINE)) {
855 siw_copy_sgl(wr->sg_list, sqe->sge,
856 wr->num_sge);
857 sqe->num_sge = wr->num_sge;
859 rv = siw_copy_inline_sgl(wr, sqe);
861 rv = -EINVAL;
864 sqe->flags |= SIW_WQE_INLINE;
865 sqe->num_sge = 1;
867 if (wr->opcode == IB_WR_SEND)
868 sqe->opcode = SIW_OP_SEND;
870 sqe->opcode = SIW_OP_SEND_REMOTE_INV;
871 sqe->rkey = wr->ex.invalidate_rkey;
881 * a private per-rreq tag referring to a checked
884 if (unlikely(wr->num_sge != 1)) {
885 rv = -EINVAL;
888 siw_copy_sgl(wr->sg_list, &sqe->sge[0], 1);
892 sqe->raddr = rdma_wr(wr)->remote_addr;
893 sqe->rkey = rdma_wr(wr)->rkey;
894 sqe->num_sge = 1;
896 if (wr->opcode == IB_WR_RDMA_READ)
897 sqe->opcode = SIW_OP_READ;
899 sqe->opcode = SIW_OP_READ_LOCAL_INV;
903 if (!(wr->send_flags & IB_SEND_INLINE)) {
904 siw_copy_sgl(wr->sg_list, &sqe->sge[0],
905 wr->num_sge);
906 sqe->num_sge = wr->num_sge;
908 rv = siw_copy_inline_sgl(wr, sqe);
910 rv = -EINVAL;
913 sqe->flags |= SIW_WQE_INLINE;
914 sqe->num_sge = 1;
916 sqe->raddr = rdma_wr(wr)->remote_addr;
917 sqe->rkey = rdma_wr(wr)->rkey;
918 sqe->opcode = SIW_OP_WRITE;
922 sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
923 sqe->rkey = reg_wr(wr)->key;
924 sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
925 sqe->opcode = SIW_OP_REG_MR;
929 sqe->rkey = wr->ex.invalidate_rkey;
930 sqe->opcode = SIW_OP_INVAL_STAG;
934 siw_dbg_qp(qp, "ib wr type %d unsupported\n",
935 wr->opcode);
936 rv = -EINVAL;
940 sqe->opcode, sqe->flags,
941 (void *)(uintptr_t)sqe->id);
948 sqe->flags |= SIW_WQE_VALID;
950 qp->sq_put++;
951 wr = wr->next;
961 if (wqe->wr_status != SIW_WR_IDLE) {
962 spin_unlock_irqrestore(&qp->sq_lock, flags);
966 spin_unlock_irqrestore(&qp->sq_lock, flags);
971 if (rdma_is_kernel_res(&qp->base_qp.res)) {
974 qp->tx_ctx.in_syscall = 1;
976 if (siw_qp_sq_process(qp) != 0 && !(qp->tx_ctx.tx_suspend))
979 qp->tx_ctx.in_syscall = 0;
983 up_read(&qp->state_lock);
992 *bad_wr = wr;
999 * Post a list of R-WR's to a RQ.
1002 * @wr: Null terminated list of user WR's
1003 * @bad_wr: Points to failing WR in case of synchronous failure.
1005 int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
1012 if (qp->srq || qp->attrs.rq_size == 0) {
1013 *bad_wr = wr;
1014 return -EINVAL;
1016 if (!rdma_is_kernel_res(&qp->base_qp.res)) {
1018 *bad_wr = wr;
1019 return -EINVAL;
1023 * Try to acquire QP state lock. Must be non-blocking
1026 if (!down_read_trylock(&qp->state_lock)) {
1027 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
1037 rv = siw_rq_flush_wr(qp, wr, bad_wr);
1040 qp->attrs.state);
1041 *bad_wr = wr;
1042 rv = -ENOTCONN;
1046 if (qp->attrs.state > SIW_QP_STATE_RTS) {
1047 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
1049 * Immediately flush this WR to CQ, if QP
1051 * be empty, so WR complets in-order.
1055 rv = siw_rq_flush_wr(qp, wr, bad_wr);
1058 qp->attrs.state);
1059 *bad_wr = wr;
1060 rv = -ENOTCONN;
1062 up_read(&qp->state_lock);
1069 spin_lock_irqsave(&qp->rq_lock, flags);
1071 while (wr) {
1072 u32 idx = qp->rq_put % qp->attrs.rq_size;
1073 struct siw_rqe *rqe = &qp->recvq[idx];
1075 if (rqe->flags) {
1077 rv = -ENOMEM;
1080 if (wr->num_sge > qp->attrs.rq_max_sges) {
1081 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
1082 rv = -EINVAL;
1085 rqe->id = wr->wr_id;
1086 rqe->num_sge = wr->num_sge;
1087 siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge);
1092 rqe->flags = SIW_WQE_VALID;
1094 qp->rq_put++;
1095 wr = wr->next;
1097 spin_unlock_irqrestore(&qp->rq_lock, flags);
1099 up_read(&qp->state_lock);
1103 *bad_wr = wr;
1111 struct siw_device *sdev = to_siw_dev(base_cq->device);
1121 rdma_user_mmap_entry_remove(cq->cq_entry);
1123 atomic_dec(&sdev->num_cq);
1125 vfree(cq->queue);
1142 struct ib_udata *udata = &attrs->driver_udata;
1143 struct siw_device *sdev = to_siw_dev(base_cq->device);
1145 int rv, size = attr->cqe;
1147 if (attr->flags)
1148 return -EOPNOTSUPP;
1150 if (atomic_inc_return(&sdev->num_cq) > SIW_MAX_CQ) {
1151 siw_dbg(base_cq->device, "too many CQ's\n");
1152 rv = -ENOMEM;
1155 if (size < 1 || size > sdev->attrs.max_cqe) {
1156 siw_dbg(base_cq->device, "CQ size error: %d\n", size);
1157 rv = -EINVAL;
1161 cq->base_cq.cqe = size;
1162 cq->num_cqe = size;
1165 cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) +
1168 cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
1171 if (cq->queue == NULL) {
1172 rv = -ENOMEM;
1175 get_random_bytes(&cq->id, 4);
1176 siw_dbg(base_cq->device, "new CQ [%u]\n", cq->id);
1178 spin_lock_init(&cq->lock);
1180 cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
1190 cq->cq_entry =
1191 siw_mmap_entry_insert(ctx, cq->queue,
1193 if (!cq->cq_entry) {
1194 rv = -ENOMEM;
1198 uresp.cq_id = cq->id;
1201 if (udata->outlen < sizeof(uresp)) {
1202 rv = -EINVAL;
1212 siw_dbg(base_cq->device, "CQ creation failed: %d", rv);
1214 if (cq->queue) {
1219 rdma_user_mmap_entry_remove(cq->cq_entry);
1220 vfree(cq->queue);
1222 atomic_dec(&sdev->num_cq);
1277 smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
1283 smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
1286 return cq->cq_put - cq->cq_get;
1302 struct siw_device *sdev = to_siw_dev(base_mr->device);
1304 siw_dbg_mem(mr->mem, "deregister MR\n");
1306 atomic_dec(&sdev->num_mr);
1332 struct siw_device *sdev = to_siw_dev(pd->device);
1339 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1341 rv = -ENOMEM;
1345 rv = -EINVAL;
1348 umem = siw_umem_get(pd->device, start, len, rights);
1357 rv = -ENOMEM;
1366 struct siw_mem *mem = mr->mem;
1368 if (udata->inlen < sizeof(ureq)) {
1369 rv = -EINVAL;
1376 mr->base_mr.lkey |= ureq.stag_key;
1377 mr->base_mr.rkey |= ureq.stag_key;
1378 mem->stag |= ureq.stag_key;
1379 uresp.stag = mem->stag;
1381 if (udata->outlen < sizeof(uresp)) {
1382 rv = -EINVAL;
1389 mr->mem->stag_valid = 1;
1391 return &mr->base_mr;
1394 atomic_dec(&sdev->num_mr);
1396 if (mr->mem)
1409 struct siw_device *sdev = to_siw_dev(pd->device);
1414 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1416 rv = -ENOMEM;
1421 rv = -EOPNOTSUPP;
1426 rv = -ENOMEM;
1438 rv = -ENOMEM;
1445 mr->mem->is_pbl = 1;
1447 siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag);
1449 return &mr->base_mr;
1452 atomic_dec(&sdev->num_mr);
1457 if (mr->mem)
1477 struct siw_mem *mem = mr->mem;
1478 struct siw_pbl *pbl = mem->pbl;
1485 return -EINVAL;
1487 pble = pbl->pbe;
1489 if (pbl->max_buf < num_sle) {
1491 num_sle, pbl->max_buf);
1492 return -ENOMEM;
1497 return -EINVAL;
1500 pble->addr = sg_dma_address(slp);
1501 pble->size = sg_dma_len(slp);
1502 pble->pbl_off = 0;
1503 pbl_size = pble->size;
1504 pbl->num_buf = 1;
1507 if (pble->addr + pble->size == sg_dma_address(slp)) {
1508 pble->size += sg_dma_len(slp);
1511 pbl->num_buf++;
1512 pble->addr = sg_dma_address(slp);
1513 pble->size = sg_dma_len(slp);
1514 pble->pbl_off = pbl_size;
1520 i, pble->size, ib_virt_dma_to_ptr(pble->addr),
1525 mem->len = base_mr->length;
1526 mem->va = base_mr->iova;
1529 mem->len, (void *)(uintptr_t)mem->va, num_sle,
1530 pbl->num_buf);
1542 struct siw_device *sdev = to_siw_dev(pd->device);
1546 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1548 rv = -ENOMEM;
1553 rv = -ENOMEM;
1560 mr->mem->stag_valid = 1;
1562 siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag);
1564 return &mr->base_mr;
1570 atomic_dec(&sdev->num_mr);
1589 struct ib_srq_attr *attrs = &init_attrs->attr;
1590 struct siw_device *sdev = to_siw_dev(base_srq->device);
1596 if (init_attrs->srq_type != IB_SRQT_BASIC)
1597 return -EOPNOTSUPP;
1599 if (atomic_inc_return(&sdev->num_srq) > SIW_MAX_SRQ) {
1600 siw_dbg_pd(base_srq->pd, "too many SRQ's\n");
1601 rv = -ENOMEM;
1604 if (attrs->max_wr == 0 || attrs->max_wr > SIW_MAX_SRQ_WR ||
1605 attrs->max_sge > SIW_MAX_SGE || attrs->srq_limit > attrs->max_wr) {
1606 rv = -EINVAL;
1609 srq->max_sge = attrs->max_sge;
1610 srq->num_rqe = roundup_pow_of_two(attrs->max_wr);
1611 srq->limit = attrs->srq_limit;
1612 if (srq->limit)
1613 srq->armed = true;
1615 srq->is_kernel_res = !udata;
1618 srq->recvq =
1619 vmalloc_user(srq->num_rqe * sizeof(struct siw_rqe));
1621 srq->recvq = vcalloc(srq->num_rqe, sizeof(struct siw_rqe));
1623 if (srq->recvq == NULL) {
1624 rv = -ENOMEM;
1629 size_t length = srq->num_rqe * sizeof(struct siw_rqe);
1631 srq->srq_entry =
1632 siw_mmap_entry_insert(ctx, srq->recvq,
1634 if (!srq->srq_entry) {
1635 rv = -ENOMEM;
1639 uresp.num_rqe = srq->num_rqe;
1641 if (udata->outlen < sizeof(uresp)) {
1642 rv = -EINVAL;
1649 spin_lock_init(&srq->lock);
1651 siw_dbg_pd(base_srq->pd, "[SRQ]: success\n");
1656 if (srq->recvq) {
1658 rdma_user_mmap_entry_remove(srq->srq_entry);
1659 vfree(srq->recvq);
1661 atomic_dec(&sdev->num_srq);
1673 * parameter. siw_modify_srq() does not check the attrs->max_sge param.
1682 spin_lock_irqsave(&srq->lock, flags);
1686 rv = -EOPNOTSUPP;
1690 if (attrs->srq_limit) {
1691 if (unlikely(attrs->srq_limit > srq->num_rqe)) {
1692 rv = -EINVAL;
1695 srq->armed = true;
1697 srq->armed = false;
1699 srq->limit = attrs->srq_limit;
1702 spin_unlock_irqrestore(&srq->lock, flags);
1717 spin_lock_irqsave(&srq->lock, flags);
1719 attrs->max_wr = srq->num_rqe;
1720 attrs->max_sge = srq->max_sge;
1721 attrs->srq_limit = srq->limit;
1723 spin_unlock_irqrestore(&srq->lock, flags);
1733 * QP anymore - the code trusts the RDMA core environment to keep track
1739 struct siw_device *sdev = to_siw_dev(base_srq->device);
1745 rdma_user_mmap_entry_remove(srq->srq_entry);
1746 vfree(srq->recvq);
1747 atomic_dec(&sdev->num_srq);
1760 * @wr: List of R-WR's
1761 * @bad_wr: Updated to failing WR if posting fails.
1763 int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1770 if (unlikely(!srq->is_kernel_res)) {
1771 siw_dbg_pd(base_srq->pd,
1773 rv = -EINVAL;
1781 spin_lock_irqsave(&srq->lock, flags);
1783 while (wr) {
1784 u32 idx = srq->rq_put % srq->num_rqe;
1785 struct siw_rqe *rqe = &srq->recvq[idx];
1787 if (rqe->flags) {
1788 siw_dbg_pd(base_srq->pd, "SRQ full\n");
1789 rv = -ENOMEM;
1792 if (unlikely(wr->num_sge > srq->max_sge)) {
1793 siw_dbg_pd(base_srq->pd,
1794 "[SRQ]: too many sge's: %d\n", wr->num_sge);
1795 rv = -EINVAL;
1798 rqe->id = wr->wr_id;
1799 rqe->num_sge = wr->num_sge;
1800 siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge);
1802 /* Make sure S-RQE is completely written before valid */
1805 rqe->flags = SIW_WQE_VALID;
1807 srq->rq_put++;
1808 wr = wr->next;
1810 spin_unlock_irqrestore(&srq->lock, flags);
1813 siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv);
1814 *bad_wr = wr;
1822 struct ib_qp *base_qp = &qp->base_qp;
1828 if (qp->attrs.flags & SIW_QP_IN_DESTROY)
1832 event.device = base_qp->device;
1835 if (base_qp->event_handler) {
1837 base_qp->event_handler(&event, base_qp->qp_context);
1844 struct ib_cq *base_cq = &cq->base_cq;
1847 event.device = base_cq->device;
1850 if (base_cq->event_handler) {
1852 base_cq->event_handler(&event, base_cq->cq_context);
1859 struct ib_srq *base_srq = &srq->base_srq;
1862 event.device = base_srq->device;
1865 if (base_srq->event_handler) {
1866 siw_dbg_pd(srq->base_srq.pd,
1868 base_srq->event_handler(&event, base_srq->srq_context);
1877 event.device = &sdev->base_dev;
1880 siw_dbg(&sdev->base_dev, "reporting port event %d\n", etype);