Lines Matching +full:icid +full:- +full:base

2  * Copyright (c) 2015-2016  QLogic Corporation
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
32 #include <linux/dma-mapping.h>
52 #include <rdma/qedr-abi.h>
70 size_t min_len = min_t(size_t, len, udata->outlen);
78 return -EINVAL;
89 memset(sgid->raw, 0, sizeof(sgid->raw));
90 ether_addr_copy(sgid->raw, dev->ndev->dev_addr);
93 sgid->global.interface_id, sgid->global.subnet_prefix);
100 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
101 struct qedr_device_attr *qattr = &dev->attr;
104 srq_attr->srq_limit = srq->srq_limit;
105 srq_attr->max_wr = qattr->max_srq_wr;
106 srq_attr->max_sge = qattr->max_sge;
115 struct qedr_device_attr *qattr = &dev->attr;
117 if (!dev->rdma_ctx) {
120 dev->rdma_ctx);
121 return -EINVAL;
126 attr->fw_ver = qattr->fw_ver;
127 attr->sys_image_guid = qattr->sys_image_guid;
128 attr->max_mr_size = qattr->max_mr_size;
129 attr->page_size_cap = qattr->page_size_caps;
130 attr->vendor_id = qattr->vendor_id;
131 attr->vendor_part_id = qattr->vendor_part_id;
132 attr->hw_ver = qattr->hw_ver;
133 attr->max_qp = qattr->max_qp;
134 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
135 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
138 attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
140 if (!rdma_protocol_iwarp(&dev->ibdev, 1))
141 attr->device_cap_flags |= IB_DEVICE_XRC;
142 attr->max_send_sge = qattr->max_sge;
143 attr->max_recv_sge = qattr->max_sge;
144 attr->max_sge_rd = qattr->max_sge;
145 attr->max_cq = qattr->max_cq;
146 attr->max_cqe = qattr->max_cqe;
147 attr->max_mr = qattr->max_mr;
148 attr->max_mw = qattr->max_mw;
149 attr->max_pd = qattr->max_pd;
150 attr->atomic_cap = dev->atomic_cap;
151 attr->max_qp_init_rd_atom =
152 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
153 attr->max_qp_rd_atom =
154 min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
155 attr->max_qp_init_rd_atom);
157 attr->max_srq = qattr->max_srq;
158 attr->max_srq_sge = qattr->max_srq_sge;
159 attr->max_srq_wr = qattr->max_srq_wr;
161 attr->local_ca_ack_delay = qattr->dev_ack_delay;
162 attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
163 attr->max_pkeys = qattr->max_pkey;
164 attr->max_ah = qattr->max_ah;
222 if (!dev->rdma_ctx) {
224 return -EINVAL;
227 rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
230 if (rdma_port->port_state == QED_RDMA_PORT_UP) {
231 attr->state = IB_PORT_ACTIVE;
232 attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
234 attr->state = IB_PORT_DOWN;
235 attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
237 attr->max_mtu = IB_MTU_4096;
238 attr->lid = 0;
239 attr->lmc = 0;
240 attr->sm_lid = 0;
241 attr->sm_sl = 0;
242 attr->ip_gids = true;
243 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
244 attr->active_mtu = iboe_get_mtu(dev->iwarp_max_mtu);
245 attr->gid_tbl_len = 1;
247 attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
248 attr->gid_tbl_len = QEDR_MAX_SGID;
249 attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
251 attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
252 attr->qkey_viol_cntr = 0;
253 get_link_speed_and_width(rdma_port->link_speed,
254 &attr->active_speed, &attr->active_width);
255 attr->max_msg_sz = rdma_port->max_msg_size;
256 attr->max_vl_num = 4;
263 struct ib_device *ibdev = uctx->device;
273 return -EFAULT;
275 if (udata->inlen) {
277 min(sizeof(ureq), udata->inlen));
280 return -EFAULT;
282 ctx->edpm_mode = !!(ureq.context_flags &
284 ctx->db_rec = !!(ureq.context_flags & QEDR_ALLOC_UCTX_DB_REC);
287 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
295 ctx->dpi = oparams.dpi;
296 ctx->dpi_addr = oparams.dpi_addr;
297 ctx->dpi_phys_addr = oparams.dpi_phys_addr;
298 ctx->dpi_size = oparams.dpi_size;
301 rc = -ENOMEM;
305 entry->io_address = ctx->dpi_phys_addr;
306 entry->length = ctx->dpi_size;
307 entry->mmap_flag = QEDR_USER_MMAP_IO_WC;
308 entry->dpi = ctx->dpi;
309 entry->dev = dev;
310 rc = rdma_user_mmap_entry_insert(uctx, &entry->rdma_entry,
311 ctx->dpi_size);
316 ctx->db_mmap_entry = &entry->rdma_entry;
318 if (!dev->user_dpm_enabled)
320 else if (rdma_protocol_iwarp(&dev->ibdev, 1))
336 uresp.db_pa = rdma_user_mmap_get_offset(ctx->db_mmap_entry);
337 uresp.db_size = ctx->dpi_size;
338 uresp.max_send_wr = dev->attr.max_sqe;
339 uresp.max_recv_wr = dev->attr.max_rqe;
340 uresp.max_srq_wr = dev->attr.max_srq_wr;
343 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
350 ctx->dev = dev;
353 &ctx->ibucontext);
357 if (!ctx->db_mmap_entry)
358 dev->ops->rdma_remove_user(dev->rdma_ctx, ctx->dpi);
360 rdma_user_mmap_entry_remove(ctx->db_mmap_entry);
369 DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
372 rdma_user_mmap_entry_remove(uctx->db_mmap_entry);
378 struct qedr_dev *dev = entry->dev;
380 if (entry->mmap_flag == QEDR_USER_MMAP_PHYS_PAGE)
381 free_page((unsigned long)entry->address);
382 else if (entry->mmap_flag == QEDR_USER_MMAP_IO_WC)
383 dev->ops->rdma_remove_user(dev->rdma_ctx, entry->dpi);
390 struct ib_device *dev = ucontext->device;
391 size_t length = vma->vm_end - vma->vm_start;
399 vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
404 vma->vm_pgoff);
405 return -EINVAL;
410 entry->io_address, length, entry->mmap_flag);
412 switch (entry->mmap_flag) {
414 pfn = entry->io_address >> PAGE_SHIFT;
416 pgprot_writecombine(vma->vm_page_prot),
420 rc = vm_insert_page(vma, vma->vm_start,
421 virt_to_page(entry->address));
424 rc = -EINVAL;
430 entry->io_address, length, entry->mmap_flag, rc);
438 struct ib_device *ibdev = ibpd->device;
447 if (!dev->rdma_ctx) {
449 return -EINVAL;
452 rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
456 pd->pd_id = pd_id;
468 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
472 pd->uctx = context;
473 pd->uctx->pd = pd;
481 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
484 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
485 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
492 struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
495 return dev->ops->rdma_alloc_xrcd(dev->rdma_ctx, &xrcd->xrcd_id);
500 struct qedr_dev *dev = get_qedr_dev(ibxrcd->device);
501 u16 xrcd_id = get_qedr_xrcd(ibxrcd)->xrcd_id;
503 dev->ops->rdma_dealloc_xrcd(dev->rdma_ctx, xrcd_id);
509 struct pci_dev *pdev = dev->pdev;
512 for (i = 0; i < pbl_info->num_pbls; i++) {
515 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
533 struct pci_dev *pdev = dev->pdev;
540 pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
542 return ERR_PTR(-ENOMEM);
544 for (i = 0; i < pbl_info->num_pbls; i++) {
545 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
554 /* Two-Layer PBLs, if we have more than one pbl we need to initialize
558 for (i = 0; i < pbl_info->num_pbls - 1; i++)
564 for (i--; i >= 0; i--)
565 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
570 return ERR_PTR(-ENOMEM);
585 return -EINVAL;
601 pbl_info->two_layered = true;
607 pbl_info->two_layered = false;
610 pbl_info->num_pbls = num_pbls;
611 pbl_info->pbl_size = pbl_size;
612 pbl_info->num_pbes = num_pbes;
616 pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
630 if (!pbl_info->num_pbes)
636 if (pbl_info->two_layered)
641 pbe = (struct regpair *)pbl_tbl->va;
652 pbe->lo = cpu_to_le32(pg_addr);
653 pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
659 if (total_num_pbes == pbl_info->num_pbes)
664 if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
666 pbe = (struct regpair *)pbl_tbl->va;
683 return dev->ops->common->db_recovery_add(dev->cdev, db_addr, db_data,
699 dev->ops->common->db_recovery_del(dev->cdev, db_addr, db_data);
712 uresp.icid = cq->icid;
713 if (cq->q.db_mmap_entry)
715 rdma_user_mmap_get_offset(cq->q.db_mmap_entry);
719 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
726 if (cq->latest_cqe == cq->toggle_cqe)
727 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
729 cq->latest_cqe = qed_chain_consume(&cq->pbl);
753 /* Aborting for non doorbell userqueue (SRQ) or non-supporting lib */
754 if (requires_db_rec == 0 || !uctx->db_rec)
758 q->db_rec_data = (void *)get_zeroed_page(GFP_USER);
759 if (!q->db_rec_data) {
761 return -ENOMEM;
768 entry->address = q->db_rec_data;
769 entry->length = PAGE_SIZE;
770 entry->mmap_flag = QEDR_USER_MMAP_PHYS_PAGE;
771 rc = rdma_user_mmap_entry_insert(&uctx->ibucontext,
772 &entry->rdma_entry,
777 q->db_mmap_entry = &entry->rdma_entry;
785 free_page((unsigned long)q->db_rec_data);
786 q->db_rec_data = NULL;
787 return -ENOMEM;
800 q->buf_addr = buf_addr;
801 q->buf_len = buf_len;
802 q->umem = ib_umem_get(&dev->ibdev, q->buf_addr, q->buf_len, access);
803 if (IS_ERR(q->umem)) {
805 PTR_ERR(q->umem));
806 return PTR_ERR(q->umem);
809 fw_pages = ib_umem_num_dma_blocks(q->umem, 1 << FW_PAGE_SHIFT);
810 rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
815 q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
816 if (IS_ERR(q->pbl_tbl)) {
817 rc = PTR_ERR(q->pbl_tbl);
820 qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
823 q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL);
824 if (!q->pbl_tbl) {
825 rc = -ENOMEM;
834 ib_umem_release(q->umem);
835 q->umem = NULL;
849 params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
850 params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
851 params->cnq_id = vector;
852 params->cq_size = chain_entries - 1;
853 params->dpi = (ctx) ? ctx->dpi : dev->dpi;
854 params->pbl_num_pages = page_cnt;
855 params->pbl_ptr = pbl_ptr;
856 params->pbl_two_level = 0;
861 cq->db.data.agg_flags = flags;
862 cq->db.data.value = cpu_to_le32(cons);
863 writeq(cq->db.raw, cq->db_addr);
872 dev = get_qedr_dev(ibcq->device);
874 if (cq->destroyed) {
876 "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
877 cq, cq->icid);
878 return -EINVAL;
882 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
885 spin_lock_irqsave(&cq->cq_lock, sflags);
887 cq->arm_flags = 0;
890 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
893 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
895 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
897 spin_unlock_irqrestore(&cq->cq_lock, sflags);
905 struct ib_udata *udata = &attrs->driver_udata;
906 struct ib_device *ibdev = ibcq->device;
920 int vector = attr->comp_vector;
921 int entries = attr->cqe;
927 u16 icid;
934 if (attr->flags)
935 return -EOPNOTSUPP;
941 return -EINVAL;
948 /* calc db offset. user will add DPI base, kernel will add db addr */
953 udata->inlen))) {
965 cq->cq_type = QEDR_CQ_TYPE_USER;
967 rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr,
973 pbl_ptr = cq->q.pbl_tbl->pa;
974 page_cnt = cq->q.pbl_info.num_pbes;
976 cq->ibcq.cqe = chain_entries;
977 cq->q.db_addr = ctx->dpi_addr + db_offset;
979 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
981 rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl,
986 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
987 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
988 cq->ibcq.cqe = cq->pbl.capacity;
994 rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
998 cq->icid = icid;
999 cq->sig = QEDR_CQ_MAGIC_NUMBER;
1000 spin_lock_init(&cq->cq_lock);
1007 rc = qedr_db_recovery_add(dev, cq->q.db_addr,
1008 &cq->q.db_rec_data->db_data,
1016 cq->db.data.icid = cq->icid;
1017 cq->db_addr = dev->db_addr + db_offset;
1018 cq->db.data.params = DB_AGG_CMD_MAX <<
1022 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
1023 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
1024 cq->latest_cqe = NULL;
1026 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
1028 rc = qedr_db_recovery_add(dev, cq->db_addr, &cq->db.data,
1035 "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
1036 cq->icid, cq, params.cq_size);
1041 destroy_iparams.icid = cq->icid;
1042 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
1046 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1047 ib_umem_release(cq->q.umem);
1048 if (cq->q.db_mmap_entry)
1049 rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1051 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1054 return -EINVAL;
1062 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
1068 DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
1070 cq->destroyed = 1;
1073 if (cq->cq_type == QEDR_CQ_TYPE_GSI) {
1074 qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1078 iparams.icid = cq->icid;
1079 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
1080 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
1083 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
1084 ib_umem_release(cq->q.umem);
1086 if (cq->q.db_rec_data) {
1087 qedr_db_recovery_del(dev, cq->q.db_addr,
1088 &cq->q.db_rec_data->db_data);
1089 rdma_user_mmap_entry_remove(cq->q.db_mmap_entry);
1092 qedr_db_recovery_del(dev, cq->db_addr, &cq->db.data);
1095 /* We don't want the IRQ handler to handle a non-existing CQ so we
1107 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1109 iter--;
1113 while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
1115 iter--;
1134 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
1139 gid_attr = grh->sgid_attr;
1140 ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL);
1147 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1148 sizeof(qp_params->sgid));
1149 memcpy(&qp_params->dgid.bytes[0],
1150 &grh->dgid,
1151 sizeof(qp_params->dgid));
1152 qp_params->roce_mode = ROCE_V2_IPV6;
1153 SET_FIELD(qp_params->modify_flags,
1157 memcpy(&qp_params->sgid.bytes[0], &gid_attr->gid.raw[0],
1158 sizeof(qp_params->sgid));
1159 memcpy(&qp_params->dgid.bytes[0],
1160 &grh->dgid,
1161 sizeof(qp_params->dgid));
1162 qp_params->roce_mode = ROCE_V1;
1165 memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1166 memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1167 ipv4_addr = qedr_get_ipv4_from_gid(gid_attr->gid.raw);
1168 qp_params->sgid.ipv4_addr = ipv4_addr;
1170 qedr_get_ipv4_from_gid(grh->dgid.raw);
1171 qp_params->dgid.ipv4_addr = ipv4_addr;
1172 SET_FIELD(qp_params->modify_flags,
1174 qp_params->roce_mode = ROCE_V2_IPV4;
1177 return -EINVAL;
1181 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1182 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1185 if (qp_params->vlan_id >= VLAN_CFI_MASK)
1186 qp_params->vlan_id = 0;
1195 struct qedr_device_attr *qattr = &dev->attr;
1197 /* QP0... attrs->qp_type == IB_QPT_GSI */
1198 if (attrs->qp_type != IB_QPT_RC &&
1199 attrs->qp_type != IB_QPT_GSI &&
1200 attrs->qp_type != IB_QPT_XRC_INI &&
1201 attrs->qp_type != IB_QPT_XRC_TGT) {
1204 attrs->qp_type);
1205 return -EOPNOTSUPP;
1208 if (attrs->cap.max_send_wr > qattr->max_sqe) {
1211 attrs->cap.max_send_wr, qattr->max_sqe);
1212 return -EINVAL;
1215 if (attrs->cap.max_inline_data > qattr->max_inline) {
1218 attrs->cap.max_inline_data, qattr->max_inline);
1219 return -EINVAL;
1222 if (attrs->cap.max_send_sge > qattr->max_sge) {
1225 attrs->cap.max_send_sge, qattr->max_sge);
1226 return -EINVAL;
1229 if (attrs->cap.max_recv_sge > qattr->max_sge) {
1232 attrs->cap.max_recv_sge, qattr->max_sge);
1233 return -EINVAL;
1239 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
1240 (attrs->qp_type != IB_QPT_XRC_TGT) &&
1241 (attrs->qp_type != IB_QPT_XRC_INI)) {
1242 struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
1243 struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
1245 if ((send_cq->cq_type == QEDR_CQ_TYPE_GSI) ||
1246 (recv_cq->cq_type == QEDR_CQ_TYPE_GSI)) {
1249 return -EINVAL;
1262 uresp.srq_id = srq->srq_id;
1276 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1277 uresp->rq_db_offset =
1279 uresp->rq_db2_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_FLAGS);
1281 uresp->rq_db_offset =
1285 uresp->rq_icid = qp->icid;
1286 if (qp->urq.db_mmap_entry)
1287 uresp->rq_db_rec_addr =
1288 rdma_user_mmap_get_offset(qp->urq.db_mmap_entry);
1295 uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1298 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1299 uresp->sq_icid = qp->icid;
1301 uresp->sq_icid = qp->icid + 1;
1303 if (qp->usq.db_mmap_entry)
1304 uresp->sq_db_rec_addr =
1305 rdma_user_mmap_get_offset(qp->usq.db_mmap_entry);
1322 uresp->atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1323 uresp->qp_id = qp->qp_id;
1328 "create qp: failed a copy to user space with qp icid=0x%x.\n",
1329 qp->icid);
1336 qed_chain_reset(&qph->pbl);
1337 qph->prod = 0;
1338 qph->cons = 0;
1339 qph->wqe_cons = 0;
1340 qph->db_data.data.value = cpu_to_le16(0);
1348 spin_lock_init(&qp->q_lock);
1349 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1350 kref_init(&qp->refcnt);
1351 init_completion(&qp->iwarp_cm_comp);
1352 init_completion(&qp->qp_rel_comp);
1355 qp->pd = pd;
1356 qp->qp_type = attrs->qp_type;
1357 qp->max_inline_data = attrs->cap.max_inline_data;
1358 qp->state = QED_ROCE_QP_STATE_RESET;
1360 qp->prev_wqe_size = 0;
1362 qp->signaled = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1363 qp->dev = dev;
1365 qedr_reset_qp_hwq_info(&qp->sq);
1366 qp->sq.max_sges = attrs->cap.max_send_sge;
1367 qp->sq_cq = get_qedr_cq(attrs->send_cq);
1370 qp->sq.max_sges, qp->sq_cq->icid);
1373 if (attrs->srq)
1374 qp->srq = get_qedr_srq(attrs->srq);
1377 qedr_reset_qp_hwq_info(&qp->rq);
1378 qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1379 qp->rq.max_sges = attrs->cap.max_recv_sge;
1382 qp->rq.max_sges, qp->rq_cq->icid);
1387 pd->pd_id, qp->qp_type, qp->max_inline_data,
1388 qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1391 qp->sq.max_sges, qp->sq_cq->icid);
1399 qp->sq.db = dev->db_addr +
1401 qp->sq.db_data.data.icid = qp->icid + 1;
1402 rc = qedr_db_recovery_add(dev, qp->sq.db, &qp->sq.db_data,
1409 qp->rq.db = dev->db_addr +
1411 qp->rq.db_data.data.icid = qp->icid;
1412 rc = qedr_db_recovery_add(dev, qp->rq.db, &qp->rq.db_data,
1415 qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
1425 struct qedr_device_attr *qattr = &dev->attr;
1427 if (attrs->attr.max_wr > qattr->max_srq_wr) {
1430 attrs->attr.max_wr, qattr->max_srq_wr);
1431 return -EINVAL;
1434 if (attrs->attr.max_sge > qattr->max_sge) {
1437 attrs->attr.max_sge, qattr->max_sge);
1440 if (!udata && attrs->srq_type == IB_SRQT_XRC) {
1441 DP_ERR(dev, "XRC SRQs are not supported in kernel-space\n");
1442 return -EINVAL;
1450 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1451 ib_umem_release(srq->usrq.umem);
1452 ib_umem_release(srq->prod_umem);
1457 struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1458 struct qedr_dev *dev = srq->dev;
1460 dev->ops->common->chain_free(dev->cdev, &hw_srq->pbl);
1462 dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1463 hw_srq->virt_prod_pair_addr,
1464 hw_srq->phy_prod_pair_addr);
1475 rc = qedr_init_user_queue(udata, srq->dev, &srq->usrq, ureq->srq_addr,
1476 ureq->srq_len, false, access, 1);
1480 srq->prod_umem = ib_umem_get(srq->ibsrq.device, ureq->prod_pair_addr,
1482 if (IS_ERR(srq->prod_umem)) {
1483 qedr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
1484 ib_umem_release(srq->usrq.umem);
1485 DP_ERR(srq->dev,
1487 PTR_ERR(srq->prod_umem));
1488 return PTR_ERR(srq->prod_umem);
1491 sg = srq->prod_umem->sgt_append.sgt.sgl;
1492 srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
1501 struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
1513 va = dma_alloc_coherent(&dev->pdev->dev,
1519 return -ENOMEM;
1522 hw_srq->phy_prod_pair_addr = phy_prod_pair_addr;
1523 hw_srq->virt_prod_pair_addr = va;
1525 num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
1528 rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, &params);
1532 hw_srq->num_elems = num_elems;
1537 dma_free_coherent(&dev->pdev->dev, sizeof(struct rdma_srq_producers),
1547 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1549 struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
1561 if (init_attr->srq_type != IB_SRQT_BASIC &&
1562 init_attr->srq_type != IB_SRQT_XRC)
1563 return -EOPNOTSUPP;
1567 return -EINVAL;
1569 srq->dev = dev;
1570 srq->is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
1571 hw_srq = &srq->hw_srq;
1572 spin_lock_init(&srq->lock);
1574 hw_srq->max_wr = init_attr->attr.max_wr;
1575 hw_srq->max_sges = init_attr->attr.max_sge;
1579 udata->inlen))) {
1589 page_cnt = srq->usrq.pbl_info.num_pbes;
1590 pbl_base_addr = srq->usrq.pbl_tbl->pa;
1591 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1600 pbl = &hw_srq->pbl;
1603 phy_prod_pair_addr = hw_srq->phy_prod_pair_addr;
1607 in_params.pd_id = pd->pd_id;
1612 if (srq->is_xrc) {
1613 struct qedr_xrcd *xrcd = get_qedr_xrcd(init_attr->ext.xrc.xrcd);
1614 struct qedr_cq *cq = get_qedr_cq(init_attr->ext.cq);
1617 in_params.xrcd_id = xrcd->xrcd_id;
1618 in_params.cq_cid = cq->icid;
1621 rc = dev->ops->rdma_create_srq(dev->rdma_ctx, &in_params, &out_params);
1625 srq->srq_id = out_params.srq_id;
1633 rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL);
1638 "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
1642 destroy_in_params.srq_id = srq->srq_id;
1644 dev->ops->rdma_destroy_srq(dev->rdma_ctx, &destroy_in_params);
1651 return -EFAULT;
1657 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1660 xa_erase_irq(&dev->srqs, srq->srq_id);
1661 in_params.srq_id = srq->srq_id;
1662 in_params.is_xrc = srq->is_xrc;
1663 dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
1665 if (ibsrq->uobject)
1672 srq->srq_id);
1680 struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
1688 return -EINVAL;
1692 if (attr->srq_limit >= srq->hw_srq.max_wr) {
1695 attr->srq_limit, srq->hw_srq.max_wr);
1696 return -EINVAL;
1699 in_params.srq_id = srq->srq_id;
1700 in_params.wqe_limit = attr->srq_limit;
1701 rc = dev->ops->rdma_modify_srq(dev->rdma_ctx, &in_params);
1706 srq->srq_limit = attr->srq_limit;
1709 "modify srq: modified srq with srq_id=0x%0x\n", srq->srq_id);
1737 params->qp_handle_async_lo = lower_32_bits((uintptr_t) qp);
1738 params->qp_handle_async_hi = upper_32_bits((uintptr_t) qp);
1740 params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1741 params->fmr_and_reserved_lkey = fmr_and_reserved_lkey;
1742 params->qp_type = qedr_ib_to_qed_qp_type(attrs->qp_type);
1743 params->stats_queue = 0;
1746 params->pd = pd->pd_id;
1747 params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1751 params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1754 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1757 params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1758 params->srq_id = qp->srq->srq_id;
1759 params->use_srq = true;
1761 params->srq_id = 0;
1762 params->use_srq = false;
1776 qedr_qp_has_sq(qp) ? qp->usq.buf_addr : 0x0,
1777 qedr_qp_has_sq(qp) ? qp->usq.buf_len : 0,
1778 qedr_qp_has_rq(qp) ? qp->urq.buf_addr : 0x0,
1779 qedr_qp_has_sq(qp) ? qp->urq.buf_len : 0);
1787 qp->usq.pbl_tbl->va = out_params->sq_pbl_virt;
1788 qp->usq.pbl_tbl->pa = out_params->sq_pbl_phys;
1790 qedr_populate_pbls(dev, qp->usq.umem, qp->usq.pbl_tbl,
1791 &qp->usq.pbl_info, FW_PAGE_SHIFT);
1792 if (!qp->srq) {
1793 qp->urq.pbl_tbl->va = out_params->rq_pbl_virt;
1794 qp->urq.pbl_tbl->pa = out_params->rq_pbl_phys;
1797 qedr_populate_pbls(dev, qp->urq.umem, qp->urq.pbl_tbl,
1798 &qp->urq.pbl_info, FW_PAGE_SHIFT);
1806 ib_umem_release(qp->usq.umem);
1807 qp->usq.umem = NULL;
1811 ib_umem_release(qp->urq.umem);
1812 qp->urq.umem = NULL;
1815 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1816 qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1817 qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1819 kfree(qp->usq.pbl_tbl);
1820 kfree(qp->urq.pbl_tbl);
1823 if (qp->usq.db_rec_data) {
1824 qedr_db_recovery_del(dev, qp->usq.db_addr,
1825 &qp->usq.db_rec_data->db_data);
1826 rdma_user_mmap_entry_remove(qp->usq.db_mmap_entry);
1829 if (qp->urq.db_rec_data) {
1830 qedr_db_recovery_del(dev, qp->urq.db_addr,
1831 &qp->urq.db_rec_data->db_data);
1832 rdma_user_mmap_entry_remove(qp->urq.db_mmap_entry);
1835 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1836 qedr_db_recovery_del(dev, qp->urq.db_rec_db2_addr,
1837 &qp->urq.db_rec_db2_data);
1850 int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
1855 qp->create_type = QEDR_QP_CREATE_USER;
1859 ctx = pd->uctx;
1864 udata->inlen));
1872 /* SQ - read access only (0) */
1873 rc = qedr_init_user_queue(udata, dev, &qp->usq, ureq.sq_addr,
1880 /* RQ - read access only (0) */
1881 rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr,
1884 ib_umem_release(qp->usq.umem);
1885 qp->usq.umem = NULL;
1886 if (rdma_protocol_roce(&dev->ibdev, 1)) {
1887 qedr_free_pbl(dev, &qp->usq.pbl_info,
1888 qp->usq.pbl_tbl);
1890 kfree(qp->usq.pbl_tbl);
1901 if (qp->qp_type == IB_QPT_XRC_TGT) {
1902 struct qedr_xrcd *xrcd = get_qedr_xrcd(attrs->xrcd);
1904 in_params.xrcd_id = xrcd->xrcd_id;
1905 in_params.qp_handle_lo = qp->qp_id;
1910 in_params.sq_num_pages = qp->usq.pbl_info.num_pbes;
1911 in_params.sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1915 in_params.rq_num_pages = qp->urq.pbl_info.num_pbes;
1916 in_params.rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1920 SET_FIELD(in_params.flags, QED_ROCE_EDPM_MODE, ctx->edpm_mode);
1922 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1925 if (!qp->qed_qp) {
1926 rc = -ENOMEM;
1930 if (rdma_protocol_iwarp(&dev->ibdev, 1))
1933 qp->qp_id = out_params.qp_id;
1934 qp->icid = out_params.icid;
1944 qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
1945 qp->sq.max_wr = attrs->cap.max_send_wr;
1946 rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
1947 &qp->usq.db_rec_data->db_data,
1955 qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
1956 qp->rq.max_wr = attrs->cap.max_recv_wr;
1957 rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
1958 &qp->urq.db_rec_data->db_data,
1965 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1966 qp->urq.db_rec_db2_addr = ctx->dpi_addr + uresp.rq_db2_offset;
1971 qp->urq.db_rec_db2_data.data.icid = cpu_to_le16(qp->icid);
1972 qp->urq.db_rec_db2_data.data.value =
1975 rc = qedr_db_recovery_add(dev, qp->urq.db_rec_db2_addr,
1976 &qp->urq.db_rec_db2_data,
1985 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1998 qp->sq.db = dev->db_addr +
2000 qp->sq.db_data.data.icid = qp->icid;
2002 rc = qedr_db_recovery_add(dev, qp->sq.db,
2003 &qp->sq.db_data,
2009 qp->rq.db = dev->db_addr +
2011 qp->rq.db_data.data.icid = qp->icid;
2012 qp->rq.iwarp_db2 = dev->db_addr +
2014 qp->rq.iwarp_db2_data.data.icid = qp->icid;
2015 qp->rq.iwarp_db2_data.data.value = DQ_TCM_IWARP_POST_RQ_CF_CMD;
2017 rc = qedr_db_recovery_add(dev, qp->rq.db,
2018 &qp->rq.db_data,
2024 rc = qedr_db_recovery_add(dev, qp->rq.iwarp_db2,
2025 &qp->rq.iwarp_db2_data,
2048 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
2052 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
2053 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
2059 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
2063 in_params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
2064 in_params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
2066 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2069 if (!qp->qed_qp)
2070 return -EINVAL;
2072 qp->qp_id = out_params.qp_id;
2073 qp->icid = out_params.icid;
2091 in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
2095 in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
2100 qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
2103 if (!qp->qed_qp)
2104 return -EINVAL;
2114 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
2124 rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
2128 qp->qp_id = out_params.qp_id;
2129 qp->icid = out_params.icid;
2134 dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2141 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
2142 kfree(qp->wqe_wr_id);
2144 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
2145 kfree(qp->rqe_wr_id);
2148 if (qp->qp_type == IB_QPT_GSI)
2151 qedr_db_recovery_del(dev, qp->sq.db, &qp->sq.db_data);
2153 if (!qp->srq) {
2154 qedr_db_recovery_del(dev, qp->rq.db, &qp->rq.db_data);
2156 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2157 qedr_db_recovery_del(dev, qp->rq.iwarp_db2,
2158 &qp->rq.iwarp_db2_data);
2169 int rc = -EINVAL;
2175 qp->create_type = QEDR_QP_CREATE_KERNEL;
2187 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier,
2188 dev->attr.max_sqe);
2190 qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
2192 if (!qp->wqe_wr_id) {
2194 return -ENOMEM;
2205 qp->rq.max_wr = (u16) max_t(u32, attrs->cap.max_recv_wr, 1);
2208 qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
2210 if (!qp->rqe_wr_id) {
2213 kfree(qp->wqe_wr_id);
2214 return -ENOMEM;
2219 n_sq_entries = attrs->cap.max_send_wr;
2220 n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
2224 n_rq_elems = qp->rq.max_wr * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
2226 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2246 if (qp->qp_type != IB_QPT_GSI) {
2247 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2252 if (qp->create_type == QEDR_QP_CREATE_USER)
2264 struct ib_pd *ibpd = ibqp->pd;
2266 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2270 if (attrs->create_flags)
2271 return -EOPNOTSUPP;
2273 if (attrs->qp_type == IB_QPT_XRC_TGT)
2274 xrcd = get_qedr_xrcd(attrs->xrcd);
2287 udata ? "user library" : "kernel", attrs->event_handler, pd,
2288 get_qedr_cq(attrs->send_cq),
2289 get_qedr_cq(attrs->send_cq)->icid,
2290 get_qedr_cq(attrs->recv_cq),
2291 attrs->recv_cq ? get_qedr_cq(attrs->recv_cq)->icid : 0);
2295 if (attrs->qp_type == IB_QPT_GSI)
2306 qp->ibqp.qp_num = qp->qp_id;
2308 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2309 rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
2318 return -EFAULT;
2379 status = -EINVAL;
2390 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2391 writel(qp->rq.db_data.raw, qp->rq.db);
2398 status = -EINVAL;
2403 /* RTR->XXX */
2411 status = -EINVAL;
2416 /* RTS->XXX */
2424 status = -EINVAL;
2429 /* SQD->XXX */
2436 status = -EINVAL;
2441 /* ERR->XXX */
2444 if ((qp->rq.prod != qp->rq.cons) ||
2445 (qp->sq.prod != qp->sq.cons)) {
2447 "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
2448 qp->rq.prod, qp->rq.cons, qp->sq.prod,
2449 qp->sq.cons);
2450 status = -EINVAL;
2454 status = -EINVAL;
2459 status = -EINVAL;
2471 struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
2472 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2479 attr->qp_state);
2482 return -EOPNOTSUPP;
2484 old_qp_state = qedr_get_ibqp_state(qp->state);
2486 new_qp_state = attr->qp_state;
2490 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2492 ibqp->qp_type, attr_mask)) {
2496 attr_mask, qp->qp_id, ibqp->qp_type,
2498 rc = -EINVAL;
2507 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
2516 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
2517 rc = -EINVAL;
2525 qp->qkey = attr->qkey;
2530 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
2532 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
2534 qp_params.incoming_atomic_en = attr->qp_access_flags &
2539 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2540 return -EINVAL;
2543 if (attr->path_mtu < IB_MTU_256 ||
2544 attr->path_mtu > IB_MTU_4096) {
2546 rc = -EINVAL;
2549 qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
2551 (dev->ndev->mtu)));
2554 if (!qp->mtu) {
2555 qp->mtu =
2556 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2557 pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
2563 qp_params.traffic_class_tos = grh->traffic_class;
2564 qp_params.flow_label = grh->flow_label;
2565 qp_params.hop_limit_ttl = grh->hop_limit;
2567 qp->sgid_idx = grh->sgid_index;
2573 grh->sgid_index, rc);
2577 rc = qedr_get_dmac(dev, &attr->ah_attr,
2583 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
2594 qp_params.mtu = qp->mtu;
2600 if (qp->mtu)
2601 qp_params.mtu = qp->mtu;
2604 ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
2617 * so we get: 2^2 * 2^timeout / 2^10 = 2^(timeout - 8).
2621 if (attr->timeout)
2623 1 << max_t(int, attr->timeout - 8, 0);
2627 qp->timeout = attr->timeout;
2633 qp_params.retry_cnt = attr->retry_cnt;
2639 qp_params.rnr_retry_cnt = attr->rnr_retry;
2645 qp_params.rq_psn = attr->rq_psn;
2646 qp->rq_psn = attr->rq_psn;
2650 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
2651 rc = -EINVAL;
2654 attr->max_rd_atomic,
2655 dev->attr.max_qp_req_rd_atomic_resc);
2661 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
2667 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
2673 qp_params.sq_psn = attr->sq_psn;
2674 qp->sq_psn = attr->sq_psn;
2678 if (attr->max_dest_rd_atomic >
2679 dev->attr.max_qp_resp_rd_atomic_resc) {
2682 attr->max_dest_rd_atomic,
2683 dev->attr.max_qp_resp_rd_atomic_resc);
2685 rc = -EINVAL;
2691 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
2698 qp_params.dest_qp = attr->dest_qp_num;
2699 qp->dest_qp_num = attr->dest_qp_num;
2702 cur_state = qp->state;
2709 if ((attr_mask & IB_QP_STATE) && qp->qp_type != IB_QPT_GSI &&
2711 qp->state = QED_ROCE_QP_STATE_ERR;
2713 if (qp->qp_type != IB_QPT_GSI)
2714 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
2715 qp->qed_qp, &qp_params);
2718 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
2721 qp->state = qp_params.new_state;
2732 if (params->incoming_rdma_write_en)
2734 if (params->incoming_rdma_read_en)
2736 if (params->incoming_atomic_en)
2748 struct qedr_dev *dev = qp->dev;
2755 if (qp->qp_type != IB_QPT_GSI) {
2756 rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
2759 qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2761 qp_attr->qp_state = qedr_get_ibqp_state(QED_ROCE_QP_STATE_RTS);
2764 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2765 qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
2766 qp_attr->path_mig_state = IB_MIG_MIGRATED;
2767 qp_attr->rq_psn = params.rq_psn;
2768 qp_attr->sq_psn = params.sq_psn;
2769 qp_attr->dest_qp_num = params.dest_qp;
2771 qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2773 qp_attr->cap.max_send_wr = qp->sq.max_wr;
2774 qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2775 qp_attr->cap.max_send_sge = qp->sq.max_sges;
2776 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2777 qp_attr->cap.max_inline_data = dev->attr.max_inline;
2778 qp_init_attr->cap = qp_attr->cap;
2780 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2781 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
2782 params.flow_label, qp->sgid_idx,
2784 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
2785 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
2786 rdma_ah_set_sl(&qp_attr->ah_attr, 0);
2787 qp_attr->timeout = qp->timeout;
2788 qp_attr->rnr_retry = params.rnr_retry;
2789 qp_attr->retry_cnt = params.retry_cnt;
2790 qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2791 qp_attr->pkey_index = params.pkey_index;
2792 qp_attr->port_num = 1;
2793 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
2794 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
2795 qp_attr->alt_pkey_index = 0;
2796 qp_attr->alt_port_num = 0;
2797 qp_attr->alt_timeout = 0;
2798 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2800 qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2801 qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2802 qp_attr->max_rd_atomic = params.max_rd_atomic;
2803 qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2806 qp_attr->cap.max_inline_data);
2815 struct qedr_dev *dev = qp->dev;
2820 qp, qp->qp_type);
2822 if (rdma_protocol_roce(&dev->ibdev, 1)) {
2823 if ((qp->state != QED_ROCE_QP_STATE_RESET) &&
2824 (qp->state != QED_ROCE_QP_STATE_ERR) &&
2825 (qp->state != QED_ROCE_QP_STATE_INIT)) {
2839 &qp->iwarp_cm_flags))
2840 wait_for_completion(&qp->iwarp_cm_comp);
2853 &qp->iwarp_cm_flags))
2854 wait_for_completion(&qp->iwarp_cm_comp);
2857 if (qp->qp_type == IB_QPT_GSI)
2864 if (rdma_protocol_iwarp(&dev->ibdev, 1))
2865 xa_erase(&dev->qps, qp->qp_id);
2869 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
2870 qedr_iw_qp_rem_ref(&qp->ibqp);
2871 wait_for_completion(&qp->qp_rel_comp);
2882 rdma_copy_ah_attr(&ah->attr, init_attr->ah_attr);
2891 rdma_destroy_ah_attr(&ah->attr);
2899 if (info->pbl_table)
2900 list_add_tail(&info->pbl_table->list_entry,
2901 &info->free_pbl_list);
2903 if (!list_empty(&info->inuse_pbl_list))
2904 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2906 list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2907 list_del(&pbl->list_entry);
2908 qedr_free_pbl(dev, &info->pbl_info, pbl);
2918 INIT_LIST_HEAD(&info->free_pbl_list);
2919 INIT_LIST_HEAD(&info->inuse_pbl_list);
2921 rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2926 info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2927 if (IS_ERR(info->pbl_table)) {
2928 rc = PTR_ERR(info->pbl_table);
2933 &info->pbl_table->pa);
2938 tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2944 list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2946 DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2959 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2962 int rc = -ENOMEM;
2965 return ERR_PTR(-EOPNOTSUPP);
2970 pd->pd_id, start, len, usr_addr, acc);
2973 return ERR_PTR(-EINVAL);
2979 mr->type = QEDR_MR_USER;
2981 mr->umem = ib_umem_get(ibpd->device, start, len, acc);
2982 if (IS_ERR(mr->umem)) {
2983 rc = -EFAULT;
2987 rc = init_mr_info(dev, &mr->info,
2988 ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE), 1);
2992 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2993 &mr->info.pbl_info, PAGE_SHIFT);
2995 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2997 if (rc == -EINVAL)
3006 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3007 mr->hw_mr.key = 0;
3008 mr->hw_mr.pd = pd->pd_id;
3009 mr->hw_mr.local_read = 1;
3010 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3011 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3012 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3013 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3014 mr->hw_mr.mw_bind = false;
3015 mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
3016 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3017 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3018 mr->hw_mr.page_size_log = PAGE_SHIFT;
3019 mr->hw_mr.length = len;
3020 mr->hw_mr.vaddr = usr_addr;
3021 mr->hw_mr.phy_mr = false;
3022 mr->hw_mr.dma_mr = false;
3024 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3030 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3031 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3032 mr->hw_mr.remote_atomic)
3033 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3036 mr->ibmr.lkey);
3037 return &mr->ibmr;
3040 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3042 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
3051 struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
3054 rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
3058 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3060 if (mr->type != QEDR_MR_DMA)
3061 free_mr_info(dev, &mr->info);
3064 ib_umem_release(mr->umem);
3075 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3077 int rc = -ENOMEM;
3080 "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
3087 mr->dev = dev;
3088 mr->type = QEDR_MR_FRMR;
3090 rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
3094 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3096 if (rc == -EINVAL)
3105 mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
3106 mr->hw_mr.key = 0;
3107 mr->hw_mr.pd = pd->pd_id;
3108 mr->hw_mr.local_read = 1;
3109 mr->hw_mr.local_write = 0;
3110 mr->hw_mr.remote_read = 0;
3111 mr->hw_mr.remote_write = 0;
3112 mr->hw_mr.remote_atomic = 0;
3113 mr->hw_mr.mw_bind = false;
3114 mr->hw_mr.pbl_ptr = 0;
3115 mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
3116 mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
3117 mr->hw_mr.length = 0;
3118 mr->hw_mr.vaddr = 0;
3119 mr->hw_mr.phy_mr = true;
3120 mr->hw_mr.dma_mr = false;
3122 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3128 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3129 mr->ibmr.rkey = mr->ibmr.lkey;
3131 DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
3135 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3137 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
3149 return ERR_PTR(-EINVAL);
3154 return ERR_PTR(-EINVAL);
3156 return &mr->ibmr;
3166 if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
3167 DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages);
3168 return -ENOMEM;
3171 DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
3172 mr->npages, addr);
3174 pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
3175 pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
3176 pbe = (struct regpair *)pbl_table->va;
3177 pbe += mr->npages % pbes_in_page;
3178 pbe->lo = cpu_to_le32((u32)addr);
3179 pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
3181 mr->npages++;
3188 int work = info->completed - info->completed_handled - 1;
3191 while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
3199 pbl = list_first_entry(&info->inuse_pbl_list,
3201 list_move_tail(&pbl->list_entry, &info->free_pbl_list);
3202 info->completed_handled++;
3211 mr->npages = 0;
3213 handle_completed_mrs(mr->dev, &mr->info);
3219 struct qedr_dev *dev = get_qedr_dev(ibpd->device);
3226 return ERR_PTR(-ENOMEM);
3228 mr->type = QEDR_MR_DMA;
3230 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
3232 if (rc == -EINVAL)
3241 mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
3242 mr->hw_mr.pd = pd->pd_id;
3243 mr->hw_mr.local_read = 1;
3244 mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
3245 mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
3246 mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
3247 mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
3248 mr->hw_mr.dma_mr = true;
3250 rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
3256 mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3257 if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
3258 mr->hw_mr.remote_atomic)
3259 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
3261 DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
3262 return &mr->ibmr;
3265 dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
3273 return (((wq->prod + 1) % wq->max_wr) == wq->cons);
3300 u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
3320 for (i = 0; i < wr->num_sge; i++) {
3321 u32 len = wr->sg_list[i].length;
3322 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
3329 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
3341 seg_siz -= cur;
3345 len -= cur;
3347 /* Swap fully-completed segments */
3362 DMA_REGPAIR_LE(sge->addr, vaddr); \
3363 (sge)->length = cpu_to_le32(vlength); \
3364 (sge)->flags = cpu_to_le32(vflags); \
3369 DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
3370 (hdr)->num_sges = num_sge; \
3375 DMA_REGPAIR_LE(sge->addr, vaddr); \
3376 (sge)->length = cpu_to_le32(vlength); \
3377 (sge)->l_key = cpu_to_le32(vlkey); \
3386 for (i = 0; i < wr->num_sge; i++) {
3387 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
3389 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
3390 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
3391 sge->length = cpu_to_le32(wr->sg_list[i].length);
3392 data_size += wr->sg_list[i].length;
3396 *wqe_size += wr->num_sge;
3408 rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
3409 DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
3411 if (wr->send_flags & IB_SEND_INLINE &&
3412 (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
3413 wr->opcode == IB_WR_RDMA_WRITE)) {
3417 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
3418 bad_wr, &rwqe->flags, flags);
3421 return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
3432 if (wr->send_flags & IB_SEND_INLINE) {
3436 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
3437 bad_wr, &swqe->flags, flags);
3440 return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
3447 struct qedr_mr *mr = get_qedr_mr(wr->mr);
3450 fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
3451 fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
3452 fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
3453 fwqe1->l_key = wr->key;
3455 fwqe2->access_ctrl = 0;
3457 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
3458 !!(wr->access & IB_ACCESS_REMOTE_READ));
3459 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
3460 !!(wr->access & IB_ACCESS_REMOTE_WRITE));
3461 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
3462 !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
3463 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
3464 SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
3465 !!(wr->access & IB_ACCESS_LOCAL_WRITE));
3466 fwqe2->fmr_ctrl = 0;
3468 SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
3469 ilog2(mr->ibmr.page_size) - 12);
3471 fwqe2->length_hi = 0;
3472 fwqe2->length_lo = mr->ibmr.length;
3473 fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
3474 fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
3476 qp->wqe_wr_id[qp->sq.prod].mr = mr;
3511 struct qedr_dev *dev = qp->dev;
3514 err_wr = wr->num_sge > qp->sq.max_sges;
3515 wq_is_full = qedr_wq_is_full(&qp->sq);
3516 pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
3519 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
3523 qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
3526 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
3530 qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
3534 !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
3538 qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
3548 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3566 return -ENOMEM;
3569 wqe = qed_chain_produce(&qp->sq.pbl);
3570 qp->wqe_wr_id[qp->sq.prod].signaled =
3571 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
3573 wqe->flags = 0;
3574 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
3575 !!(wr->send_flags & IB_SEND_SOLICITED));
3576 comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
3577 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
3578 SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
3579 !!(wr->send_flags & IB_SEND_FENCE));
3580 wqe->prev_wqe_size = qp->prev_wqe_size;
3582 qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
3584 switch (wr->opcode) {
3586 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3587 rc = -EINVAL;
3591 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
3593 swqe->wqe_size = 2;
3594 swqe2 = qed_chain_produce(&qp->sq.pbl);
3596 swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
3599 swqe->length = cpu_to_le32(length);
3600 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3601 qp->prev_wqe_size = swqe->wqe_size;
3602 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3605 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
3608 swqe->wqe_size = 2;
3609 swqe2 = qed_chain_produce(&qp->sq.pbl);
3612 swqe->length = cpu_to_le32(length);
3613 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3614 qp->prev_wqe_size = swqe->wqe_size;
3615 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3618 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
3620 swqe2 = qed_chain_produce(&qp->sq.pbl);
3621 swqe->wqe_size = 2;
3622 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
3625 swqe->length = cpu_to_le32(length);
3626 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
3627 qp->prev_wqe_size = swqe->wqe_size;
3628 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
3632 if (unlikely(rdma_protocol_iwarp(&dev->ibdev, 1))) {
3633 rc = -EINVAL;
3637 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
3640 rwqe->wqe_size = 2;
3641 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
3642 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3645 rwqe->length = cpu_to_le32(length);
3646 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3647 qp->prev_wqe_size = rwqe->wqe_size;
3648 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3651 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
3654 rwqe->wqe_size = 2;
3655 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3658 rwqe->length = cpu_to_le32(length);
3659 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3660 qp->prev_wqe_size = rwqe->wqe_size;
3661 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3664 SET_FIELD2(wqe->flags, RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG, 1);
3668 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
3671 rwqe->wqe_size = 2;
3672 rwqe2 = qed_chain_produce(&qp->sq.pbl);
3675 rwqe->length = cpu_to_le32(length);
3676 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
3677 qp->prev_wqe_size = rwqe->wqe_size;
3678 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
3684 awqe1->wqe_size = 4;
3686 awqe2 = qed_chain_produce(&qp->sq.pbl);
3687 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
3688 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
3690 awqe3 = qed_chain_produce(&qp->sq.pbl);
3692 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
3693 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
3694 DMA_REGPAIR_LE(awqe3->swap_data,
3695 atomic_wr(wr)->compare_add);
3697 wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
3698 DMA_REGPAIR_LE(awqe3->swap_data,
3699 atomic_wr(wr)->swap);
3700 DMA_REGPAIR_LE(awqe3->cmp_data,
3701 atomic_wr(wr)->compare_add);
3706 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
3707 qp->prev_wqe_size = awqe1->wqe_size;
3712 iwqe->wqe_size = 1;
3714 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
3715 iwqe->inv_l_key = wr->ex.invalidate_rkey;
3716 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
3717 qp->prev_wqe_size = iwqe->wqe_size;
3721 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
3723 fwqe1->wqe_size = 2;
3732 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
3733 qp->prev_wqe_size = fwqe1->wqe_size;
3736 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
3737 rc = -EINVAL;
3748 value = le16_to_cpu(qp->sq.db_data.data.value);
3749 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
3752 qp->prev_wqe_size = wqe->prev_wqe_size;
3753 rc = -EINVAL;
3763 struct qedr_dev *dev = get_qedr_dev(ibqp->device);
3770 if (qp->qp_type == IB_QPT_GSI)
3773 spin_lock_irqsave(&qp->q_lock, flags);
3775 if (rdma_protocol_roce(&dev->ibdev, 1)) {
3776 if ((qp->state != QED_ROCE_QP_STATE_RTS) &&
3777 (qp->state != QED_ROCE_QP_STATE_ERR) &&
3778 (qp->state != QED_ROCE_QP_STATE_SQD)) {
3779 spin_unlock_irqrestore(&qp->q_lock, flags);
3782 "QP in wrong state! QP icid=0x%x state %d\n",
3783 qp->icid, qp->state);
3784 return -EINVAL;
3793 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
3795 qedr_inc_sw_prod(&qp->sq);
3797 qp->sq.db_data.data.value++;
3799 wr = wr->next;
3808 * qp->wqe_wr_id is accessed during qedr_poll_cq, as
3816 writel(qp->sq.db_data.raw, qp->sq.db);
3818 spin_unlock_irqrestore(&qp->q_lock, flags);
3831 used = hw_srq->wr_prod_cnt - (u32)atomic_read(&hw_srq->wr_cons_cnt);
3833 return hw_srq->max_wr - used;
3841 struct qedr_dev *dev = srq->dev;
3847 spin_lock_irqsave(&srq->lock, flags);
3849 hw_srq = &srq->hw_srq;
3850 pbl = &srq->hw_srq.pbl;
3856 wr->num_sge > srq->hw_srq.max_sges) {
3858 hw_srq->wr_prod_cnt,
3859 atomic_read(&hw_srq->wr_cons_cnt),
3860 wr->num_sge, srq->hw_srq.max_sges);
3861 status = -ENOMEM;
3867 num_sge = wr->num_sge;
3869 SRQ_HDR_SET(hdr, wr->wr_id, num_sge);
3871 srq->hw_srq.wr_prod_cnt++;
3872 hw_srq->wqe_prod++;
3873 hw_srq->sge_prod++;
3877 wr->num_sge, hw_srq->wqe_prod, wr->wr_id);
3879 for (i = 0; i < wr->num_sge; i++) {
3883 SRQ_SGE_SET(srq_sge, wr->sg_list[i].addr,
3884 wr->sg_list[i].length, wr->sg_list[i].lkey);
3888 i, srq_sge->length, srq_sge->l_key,
3889 srq_sge->addr.hi, srq_sge->addr.lo);
3890 hw_srq->sge_prod++;
3902 srq->hw_srq.virt_prod_pair_addr->sge_prod = cpu_to_le32(hw_srq->sge_prod);
3905 srq->hw_srq.virt_prod_pair_addr->wqe_prod = cpu_to_le32(hw_srq->wqe_prod);
3907 wr = wr->next;
3910 DP_DEBUG(dev, QEDR_MSG_SRQ, "POST: Elements in S-RQ: %d\n",
3912 spin_unlock_irqrestore(&srq->lock, flags);
3921 struct qedr_dev *dev = qp->dev;
3925 if (qp->qp_type == IB_QPT_GSI)
3928 spin_lock_irqsave(&qp->q_lock, flags);
3933 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3935 wr->num_sge > qp->rq.max_sges) {
3937 qed_chain_get_elem_left_u32(&qp->rq.pbl),
3938 QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3939 qp->rq.max_sges);
3940 status = -ENOMEM;
3944 for (i = 0; i < wr->num_sge; i++) {
3947 qed_chain_produce(&qp->rq.pbl);
3954 wr->num_sge);
3957 wr->sg_list[i].lkey);
3959 RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3960 wr->sg_list[i].length, flags);
3963 /* Special case of no sges. FW requires between 1-4 sges...
3967 if (!wr->num_sge) {
3970 qed_chain_produce(&qp->rq.pbl);
3982 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
3983 qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
3985 qedr_inc_sw_prod(&qp->rq);
3987 /* qp->rqe_wr_id is accessed during qedr_poll_cq, as
3996 qp->rq.db_data.data.value++;
3998 writel(qp->rq.db_data.raw, qp->rq.db);
4000 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
4001 writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
4004 wr = wr->next;
4007 spin_unlock_irqrestore(&qp->q_lock, flags);
4014 struct rdma_cqe_requester *resp_cqe = &cqe->req;
4016 return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
4017 cq->pbl_toggle;
4022 struct rdma_cqe_requester *resp_cqe = &cqe->req;
4025 qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
4026 resp_cqe->qp_handle.lo,
4033 struct rdma_cqe_requester *resp_cqe = &cqe->req;
4035 return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
4041 return cq->latest_cqe;
4053 if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
4054 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4064 while (num_entries && qp->sq.wqe_cons != hw_cons) {
4065 if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
4072 wc->status = status;
4073 wc->vendor_err = 0;
4074 wc->wc_flags = 0;
4075 wc->src_qp = qp->id;
4076 wc->qp = &qp->ibqp;
4078 wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
4079 wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
4081 switch (wc->opcode) {
4083 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4087 wc->byte_len = 8;
4090 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
4094 wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
4100 num_entries--;
4104 while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
4105 qed_chain_consume(&qp->sq.pbl);
4106 qedr_inc_sw_cons(&qp->sq);
4119 switch (req->status) {
4121 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4125 if (qp->state != QED_ROCE_QP_STATE_ERR)
4127 "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4128 cq->icid, qp->icid);
4129 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
4134 qp->state = QED_ROCE_QP_STATE_ERR;
4136 req->sq_cons - 1, IB_WC_SUCCESS, 0);
4142 switch (req->status) {
4145 "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4146 cq->icid, qp->icid);
4151 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4152 cq->icid, qp->icid);
4157 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4158 cq->icid, qp->icid);
4163 "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4164 cq->icid, qp->icid);
4169 "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4170 cq->icid, qp->icid);
4175 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4176 cq->icid, qp->icid);
4181 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4182 cq->icid, qp->icid);
4187 "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4188 cq->icid, qp->icid);
4193 "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4194 cq->icid, qp->icid);
4199 "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4200 cq->icid, qp->icid);
4205 "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
4206 cq->icid, qp->icid);
4209 cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
4242 wc->status = IB_WC_SUCCESS;
4243 wc->byte_len = le32_to_cpu(resp->length);
4245 if (resp->flags & QEDR_RESP_IMM) {
4246 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
4247 wc->wc_flags |= IB_WC_WITH_IMM;
4249 if (resp->flags & QEDR_RESP_RDMA)
4250 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
4252 if (resp->flags & QEDR_RESP_INV)
4253 return -EINVAL;
4255 } else if (resp->flags & QEDR_RESP_INV) {
4256 wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
4257 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4259 if (resp->flags & QEDR_RESP_RDMA)
4260 return -EINVAL;
4262 } else if (resp->flags & QEDR_RESP_RDMA) {
4263 return -EINVAL;
4274 wc->opcode = IB_WC_RECV;
4275 wc->wc_flags = 0;
4277 if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
4280 "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
4281 cq, cq->icid, resp->flags);
4284 wc->status = qedr_cqe_resp_status_to_ib(resp->status);
4285 if (wc->status == IB_WC_GENERAL_ERR)
4287 "CQ %p (icid=%d) contains an invalid CQE status %d\n",
4288 cq, cq->icid, resp->status);
4292 wc->vendor_err = 0;
4293 wc->src_qp = qp->id;
4294 wc->qp = &qp->ibqp;
4295 wc->wr_id = wr_id;
4302 struct qedr_srq *srq = qp->srq;
4305 wr_id = HILO_GEN(le32_to_cpu(resp->srq_wr_id.hi),
4306 le32_to_cpu(resp->srq_wr_id.lo), u64);
4308 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4309 wc->status = IB_WC_WR_FLUSH_ERR;
4310 wc->vendor_err = 0;
4311 wc->wr_id = wr_id;
4312 wc->byte_len = 0;
4313 wc->src_qp = qp->id;
4314 wc->qp = &qp->ibqp;
4315 wc->wr_id = wr_id;
4319 atomic_inc(&srq->hw_srq.wr_cons_cnt);
4327 u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4331 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4332 qed_chain_consume(&qp->rq.pbl);
4333 qedr_inc_sw_cons(&qp->rq);
4343 while (num_entries && qp->rq.wqe_cons != hw_cons) {
4345 wc->status = IB_WC_WR_FLUSH_ERR;
4346 wc->vendor_err = 0;
4347 wc->wc_flags = 0;
4348 wc->src_qp = qp->id;
4349 wc->byte_len = 0;
4350 wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
4351 wc->qp = &qp->ibqp;
4352 num_entries--;
4355 while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
4356 qed_chain_consume(&qp->rq.pbl);
4357 qedr_inc_sw_cons(&qp->rq);
4366 if (le16_to_cpu(resp->rq_cons_or_srq_id) == qp->rq.wqe_cons) {
4392 if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
4394 resp->rq_cons_or_srq_id);
4408 if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
4416 struct qedr_dev *dev = get_qedr_dev(ibcq->device);
4424 if (cq->destroyed) {
4426 "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
4427 cq, cq->icid);
4431 if (cq->cq_type == QEDR_CQ_TYPE_GSI)
4434 spin_lock_irqsave(&cq->cq_lock, flags);
4435 cqe = cq->latest_cqe;
4436 old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4450 wc->qp = &qp->ibqp;
4455 &cqe->req);
4456 try_consume_req_cqe(cq, qp, &cqe->req, &update);
4460 &cqe->resp, &update);
4464 wc, &cqe->resp);
4472 num_entries -= cnt;
4478 new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
4480 cq->cq_cons += new_cons - old_cons;
4486 doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
4488 spin_unlock_irqrestore(&cq->cq_lock, flags);