Lines Matching full:rf

105  * @rf: RDMA PCI function
109 irdma_puda_ce_handler(struct irdma_pci_f *rf, in irdma_puda_ce_handler() argument
112 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_puda_ce_handler()
136 * @rf: RDMA PCI function
140 irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq) in irdma_process_ceq() argument
142 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_process_ceq()
162 queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work); in irdma_process_ceq()
165 irdma_puda_ce_handler(rf, cq); in irdma_process_ceq()
205 * @rf: RDMA PCI function
208 irdma_process_aeq(struct irdma_pci_f *rf) in irdma_process_aeq() argument
210 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_process_aeq()
211 struct irdma_aeq *aeq = &rf->aeq; in irdma_process_aeq()
219 struct irdma_device *iwdev = rf->iwdev; in irdma_process_aeq()
235 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_AEQ, in irdma_process_aeq()
242 spin_lock_irqsave(&rf->qptable_lock, flags); in irdma_process_aeq()
243 iwqp = rf->qp_table[info->qp_cq_id]; in irdma_process_aeq()
245 spin_unlock_irqrestore(&rf->qptable_lock, in irdma_process_aeq()
248 struct irdma_device *iwdev = rf->iwdev; in irdma_process_aeq()
257 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_AEQ, in irdma_process_aeq()
263 spin_unlock_irqrestore(&rf->qptable_lock, flags); in irdma_process_aeq()
344 spin_lock_irqsave(&rf->cqtable_lock, flags); in irdma_process_aeq()
345 iwcq = rf->cq_table[info->qp_cq_id]; in irdma_process_aeq()
347 spin_unlock_irqrestore(&rf->cqtable_lock, in irdma_process_aeq()
349 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_AEQ, in irdma_process_aeq()
355 spin_unlock_irqrestore(&rf->cqtable_lock, flags); in irdma_process_aeq()
458 struct irdma_pci_f *rf = from_tasklet(rf, (struct tasklet_struct *)t, in irdma_dpc() local
461 if (rf->msix_shared) in irdma_dpc()
462 irdma_process_ceq(rf, rf->ceqlist); in irdma_dpc()
463 irdma_process_aeq(rf); in irdma_dpc()
464 irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx); in irdma_dpc()
476 struct irdma_pci_f *rf = iwceq->rf; in irdma_ceq_dpc() local
478 irdma_process_ceq(rf, iwceq); in irdma_ceq_dpc()
479 irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx); in irdma_ceq_dpc()
484 * @rf: RDMA PCI function
490 irdma_save_msix_info(struct irdma_pci_f *rf) in irdma_save_msix_info() argument
498 if (!rf->msix_count) { in irdma_save_msix_info()
499 irdma_dev_err(to_ibdev(&rf->sc_dev), "No MSI-X vectors reserved for RDMA.\n"); in irdma_save_msix_info()
503 size = sizeof(struct irdma_msix_vector) * rf->msix_count; in irdma_save_msix_info()
505 size += sizeof(*iw_qvinfo) * rf->msix_count - 1; in irdma_save_msix_info()
506 rf->iw_msixtbl = kzalloc(size, GFP_KERNEL); in irdma_save_msix_info()
507 if (!rf->iw_msixtbl) in irdma_save_msix_info()
510 rf->iw_qvlist = (struct irdma_qvlist_info *) in irdma_save_msix_info()
511 (&rf->iw_msixtbl[rf->msix_count]); in irdma_save_msix_info()
512 iw_qvlist = rf->iw_qvlist; in irdma_save_msix_info()
514 iw_qvlist->num_vectors = rf->msix_count; in irdma_save_msix_info()
515 if (rf->msix_count <= num_online_cpus()) in irdma_save_msix_info()
516 rf->msix_shared = true; in irdma_save_msix_info()
517 else if (rf->msix_count > num_online_cpus() + 1) in irdma_save_msix_info()
518 rf->msix_count = num_online_cpus() + 1; in irdma_save_msix_info()
520 for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) { in irdma_save_msix_info()
521 rf->iw_msixtbl[i].idx = rf->msix_info.entry + i; in irdma_save_msix_info()
522 rf->iw_msixtbl[i].cpu_affinity = ceq_idx; in irdma_save_msix_info()
525 if (rf->msix_shared) in irdma_save_msix_info()
534 iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx; in irdma_save_msix_info()
547 struct irdma_pci_f *rf = data; in irdma_irq_handler() local
549 tasklet_schedule(&rf->dpc_tasklet); in irdma_irq_handler()
566 * @rf: RDMA PCI function
573 irdma_free_irq(struct irdma_pci_f *rf, struct irdma_msix_vector *msix_vec) in irdma_free_irq() argument
576 bus_teardown_intr(rf->dev_ctx.dev, msix_vec->res, in irdma_free_irq()
581 bus_release_resource(rf->dev_ctx.dev, SYS_RES_IRQ, in irdma_free_irq()
590 * @rf: RDMA PCI function
597 irdma_destroy_irq(struct irdma_pci_f *rf, in irdma_destroy_irq() argument
600 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_irq()
603 irdma_free_irq(rf, msix_vec); in irdma_destroy_irq()
604 if (rf == dev_id) { in irdma_destroy_irq()
605 tasklet_kill(&rf->dpc_tasklet); in irdma_destroy_irq()
615 * @rf: RDMA PCI function
622 irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp) in irdma_destroy_cqp() argument
624 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_cqp()
625 struct irdma_cqp *cqp = &rf->cqp; in irdma_destroy_cqp()
628 if (rf->cqp_cmpl_wq) in irdma_destroy_cqp()
629 destroy_workqueue(rf->cqp_cmpl_wq); in irdma_destroy_cqp()
634 irdma_cleanup_pending_cqp_op(rf); in irdma_destroy_cqp()
643 irdma_destroy_virt_aeq(struct irdma_pci_f *rf) in irdma_destroy_virt_aeq() argument
645 struct irdma_aeq *aeq = &rf->aeq; in irdma_destroy_virt_aeq()
649 irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt); in irdma_destroy_virt_aeq()
650 irdma_free_pble(rf->pble_rsrc, &aeq->palloc); in irdma_destroy_virt_aeq()
656 * @rf: RDMA PCI function
663 irdma_destroy_aeq(struct irdma_pci_f *rf) in irdma_destroy_aeq() argument
665 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_aeq()
666 struct irdma_aeq *aeq = &rf->aeq; in irdma_destroy_aeq()
669 if (!rf->msix_shared) { in irdma_destroy_aeq()
670 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false); in irdma_destroy_aeq()
671 irdma_destroy_irq(rf, rf->iw_msixtbl, rf); in irdma_destroy_aeq()
673 if (rf->reset) in irdma_destroy_aeq()
683 irdma_destroy_virt_aeq(rf); in irdma_destroy_aeq()
690 * @rf: RDMA PCI function
697 irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq) in irdma_destroy_ceq() argument
699 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_ceq()
702 if (rf->reset) in irdma_destroy_ceq()
724 * @rf: RDMA PCI function
729 irdma_del_ceq_0(struct irdma_pci_f *rf) in irdma_del_ceq_0() argument
731 struct irdma_ceq *iwceq = rf->ceqlist; in irdma_del_ceq_0()
734 if (rf->msix_shared) { in irdma_del_ceq_0()
735 msix_vec = &rf->iw_msixtbl[0]; in irdma_del_ceq_0()
736 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, in irdma_del_ceq_0()
739 irdma_destroy_irq(rf, msix_vec, rf); in irdma_del_ceq_0()
741 msix_vec = &rf->iw_msixtbl[1]; in irdma_del_ceq_0()
742 irdma_destroy_irq(rf, msix_vec, iwceq); in irdma_del_ceq_0()
745 irdma_destroy_ceq(rf, iwceq); in irdma_del_ceq_0()
746 rf->sc_dev.ceq_valid = false; in irdma_del_ceq_0()
747 rf->ceqs_count = 0; in irdma_del_ceq_0()
752 * @rf: RDMA PCI function
758 irdma_del_ceqs(struct irdma_pci_f *rf) in irdma_del_ceqs() argument
760 struct irdma_ceq *iwceq = &rf->ceqlist[1]; in irdma_del_ceqs()
764 if (rf->msix_shared) in irdma_del_ceqs()
765 msix_vec = &rf->iw_msixtbl[1]; in irdma_del_ceqs()
767 msix_vec = &rf->iw_msixtbl[2]; in irdma_del_ceqs()
769 for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) { in irdma_del_ceqs()
770 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id, in irdma_del_ceqs()
772 irdma_destroy_irq(rf, msix_vec, iwceq); in irdma_del_ceqs()
773 irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq, in irdma_del_ceqs()
778 irdma_free_dma_mem(rf->sc_dev.hw, &iwceq->mem); in irdma_del_ceqs()
780 rf->ceqs_count = 1; in irdma_del_ceqs()
785 * @rf: RDMA PCI function
791 irdma_destroy_ccq(struct irdma_pci_f *rf) in irdma_destroy_ccq() argument
793 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_ccq()
794 struct irdma_ccq *ccq = &rf->ccq; in irdma_destroy_ccq()
797 if (!rf->reset) in irdma_destroy_ccq()
867 * @rf: RDMA PCI function
875 irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged, in irdma_create_hmc_objs() argument
878 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_hmc_objs()
884 info.entry_type = rf->sd_type; in irdma_create_hmc_objs()
923 * @rf: RDMA PCI function
933 irdma_obj_aligned_mem(struct irdma_pci_f *rf, in irdma_obj_aligned_mem() argument
940 va = (unsigned long)rf->obj_next.va; in irdma_obj_aligned_mem()
946 memptr->pa = rf->obj_next.pa + extra; in irdma_obj_aligned_mem()
948 if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size)) in irdma_obj_aligned_mem()
951 rf->obj_next.va = (u8 *)memptr->va + size; in irdma_obj_aligned_mem()
952 rf->obj_next.pa = memptr->pa + size; in irdma_obj_aligned_mem()
959 * @rf: RDMA PCI function
965 irdma_create_cqp(struct irdma_pci_f *rf) in irdma_create_cqp() argument
969 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_cqp()
971 struct irdma_cqp *cqp = &rf->cqp; in irdma_create_cqp()
997 status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx), in irdma_create_cqp()
1011 cqp_init_info.hmc_profile = rf->rsrc_profile; in irdma_create_cqp()
1013 cqp_init_info.protocol_used = rf->protocol_used; in irdma_create_cqp()
1014 cqp_init_info.en_rem_endpoint_trk = rf->en_rem_endpoint_trk; in irdma_create_cqp()
1015 memcpy(&cqp_init_info.dcqcn_params, &rf->dcqcn_params, in irdma_create_cqp()
1018 switch (rf->rdma_ver) { in irdma_create_cqp()
1068 * @rf: RDMA PCI function
1074 irdma_create_ccq(struct irdma_pci_f *rf) in irdma_create_ccq() argument
1076 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_ccq()
1078 struct irdma_ccq *ccq = &rf->ccq; in irdma_create_ccq()
1092 status = irdma_obj_aligned_mem(rf, &ccq->shadow_area, in irdma_create_ccq()
1108 info.vsi = &rf->default_vsi; in irdma_create_ccq()
1131 status = irdma_alloc_local_mac_entry(iwdev->rf, in irdma_alloc_set_mac()
1134 status = irdma_add_local_mac_entry(iwdev->rf, in irdma_alloc_set_mac()
1138 irdma_del_local_mac_entry(iwdev->rf, in irdma_alloc_set_mac()
1146 * @rf: RDMA PCI function
1157 irdma_irq_request(struct irdma_pci_f *rf, in irdma_irq_request() argument
1161 device_t dev = rf->dev_ctx.dev; in irdma_irq_request()
1167 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR, in irdma_irq_request()
1174 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR, in irdma_irq_request()
1191 * @rf: RDMA PCI function
1200 irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, in irdma_cfg_ceq_vector() argument
1205 if (rf->msix_shared && !ceq_id) { in irdma_cfg_ceq_vector()
1207 "irdma-%s-AEQCEQ-0", dev_name(&rf->pcidev->dev)); in irdma_cfg_ceq_vector()
1208 tasklet_setup(&rf->dpc_tasklet, irdma_dpc); in irdma_cfg_ceq_vector()
1209 status = irdma_irq_request(rf, msix_vec, irdma_irq_handler, rf); in irdma_cfg_ceq_vector()
1212 bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "%s", msix_vec->name); in irdma_cfg_ceq_vector()
1216 dev_name(&rf->pcidev->dev), ceq_id); in irdma_cfg_ceq_vector()
1219 status = irdma_irq_request(rf, msix_vec, irdma_ceq_handler, iwceq); in irdma_cfg_ceq_vector()
1222 bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "%s", msix_vec->name); in irdma_cfg_ceq_vector()
1225 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true); in irdma_cfg_ceq_vector()
1232 * @rf: RDMA PCI function
1238 irdma_cfg_aeq_vector(struct irdma_pci_f *rf) in irdma_cfg_aeq_vector() argument
1240 struct irdma_msix_vector *msix_vec = rf->iw_msixtbl; in irdma_cfg_aeq_vector()
1243 if (!rf->msix_shared) { in irdma_cfg_aeq_vector()
1245 "irdma-%s-AEQ", dev_name(&rf->pcidev->dev)); in irdma_cfg_aeq_vector()
1246 tasklet_setup(&rf->dpc_tasklet, irdma_dpc); in irdma_cfg_aeq_vector()
1247 status = irdma_irq_request(rf, msix_vec, irdma_irq_handler, rf); in irdma_cfg_aeq_vector()
1250 bus_describe_intr(rf->dev_ctx.dev, msix_vec->res, msix_vec->tag, "%s", msix_vec->name); in irdma_cfg_aeq_vector()
1254 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR, "aeq irq config fail\n"); in irdma_cfg_aeq_vector()
1258 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true); in irdma_cfg_aeq_vector()
1265 * @rf: RDMA PCI function
1274 irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, in irdma_create_ceq() argument
1279 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_ceq()
1284 iwceq->rf = rf; in irdma_create_ceq()
1285 ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt, in irdma_create_ceq()
1303 scratch = (uintptr_t)&rf->cqp.sc_cqp; in irdma_create_ceq()
1307 status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq, in irdma_create_ceq()
1323 * @rf: RDMA PCI function
1330 irdma_setup_ceq_0(struct irdma_pci_f *rf) in irdma_setup_ceq_0() argument
1338 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs); in irdma_setup_ceq_0()
1339 rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL); in irdma_setup_ceq_0()
1340 memset(rf->ceqlist, 0, num_ceqs * sizeof(*rf->ceqlist)); in irdma_setup_ceq_0()
1341 if (!rf->ceqlist) { in irdma_setup_ceq_0()
1346 iwceq = &rf->ceqlist[0]; in irdma_setup_ceq_0()
1347 status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi); in irdma_setup_ceq_0()
1349 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR, in irdma_setup_ceq_0()
1355 i = rf->msix_shared ? 0 : 1; in irdma_setup_ceq_0()
1356 msix_vec = &rf->iw_msixtbl[i]; in irdma_setup_ceq_0()
1359 status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec); in irdma_setup_ceq_0()
1361 irdma_destroy_ceq(rf, iwceq); in irdma_setup_ceq_0()
1365 irdma_ena_intr(&rf->sc_dev, msix_vec->idx); in irdma_setup_ceq_0()
1366 rf->ceqs_count++; in irdma_setup_ceq_0()
1369 if (status && !rf->ceqs_count) { in irdma_setup_ceq_0()
1370 kfree(rf->ceqlist); in irdma_setup_ceq_0()
1371 rf->ceqlist = NULL; in irdma_setup_ceq_0()
1374 rf->sc_dev.ceq_valid = true; in irdma_setup_ceq_0()
1381 * @rf: RDMA PCI function
1389 irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi) in irdma_setup_ceqs() argument
1398 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs); in irdma_setup_ceqs()
1399 i = (rf->msix_shared) ? 1 : 2; in irdma_setup_ceqs()
1401 iwceq = &rf->ceqlist[ceq_id]; in irdma_setup_ceqs()
1402 status = irdma_create_ceq(rf, iwceq, ceq_id, vsi); in irdma_setup_ceqs()
1404 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_ERR, in irdma_setup_ceqs()
1409 msix_vec = &rf->iw_msixtbl[i]; in irdma_setup_ceqs()
1412 status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec); in irdma_setup_ceqs()
1414 irdma_destroy_ceq(rf, iwceq); in irdma_setup_ceqs()
1417 irdma_ena_intr(&rf->sc_dev, msix_vec->idx); in irdma_setup_ceqs()
1418 rf->ceqs_count++; in irdma_setup_ceqs()
1424 irdma_del_ceqs(rf); in irdma_setup_ceqs()
1430 irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size) in irdma_create_virt_aeq() argument
1432 struct irdma_aeq *aeq = &rf->aeq; in irdma_create_virt_aeq()
1437 if (rf->rdma_ver < IRDMA_GEN_2) in irdma_create_virt_aeq()
1447 status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true); in irdma_create_virt_aeq()
1454 status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt); in irdma_create_virt_aeq()
1456 irdma_free_pble(rf->pble_rsrc, &aeq->palloc); in irdma_create_virt_aeq()
1466 * @rf: RDMA PCI function
1472 irdma_create_aeq(struct irdma_pci_f *rf) in irdma_create_aeq() argument
1475 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_aeq()
1476 struct irdma_aeq *aeq = &rf->aeq; in irdma_create_aeq()
1477 struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info; in irdma_create_aeq()
1479 u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1; in irdma_create_aeq()
1493 status = irdma_create_virt_aeq(rf, aeq_size); in irdma_create_aeq()
1507 info.msix_idx = rf->iw_msixtbl->idx; in irdma_create_aeq()
1520 irdma_destroy_virt_aeq(rf); in irdma_create_aeq()
1529 * @rf: RDMA PCI function
1535 irdma_setup_aeq(struct irdma_pci_f *rf) in irdma_setup_aeq() argument
1537 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_setup_aeq()
1540 status = irdma_create_aeq(rf); in irdma_setup_aeq()
1544 status = irdma_cfg_aeq_vector(rf); in irdma_setup_aeq()
1546 irdma_destroy_aeq(rf); in irdma_setup_aeq()
1550 if (!rf->msix_shared) in irdma_setup_aeq()
1551 irdma_ena_intr(dev, rf->iw_msixtbl[0].idx); in irdma_setup_aeq()
1574 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768); in irdma_initialize_ilq()
1582 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_ERR, "ilq create fail\n"); in irdma_initialize_ilq()
1605 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768); in irdma_initialize_ieq()
1611 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_ERR, "ieq create fail\n"); in irdma_initialize_ieq()
1624 struct irdma_pci_f *rf = iwdev->rf; in irdma_reinitialize_ieq() local
1628 iwdev->rf->reset = true; in irdma_reinitialize_ieq()
1629 rf->gen_ops.request_reset(rf); in irdma_reinitialize_ieq()
1635 * @rf: RDMA PCI function
1642 irdma_hmc_setup(struct irdma_pci_f *rf) in irdma_hmc_setup() argument
1644 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_hmc_setup()
1648 qpcnt = rsrc_limits_table[rf->limits_sel].qplimit; in irdma_hmc_setup()
1650 rf->sd_type = IRDMA_SD_TYPE_DIRECT; in irdma_hmc_setup()
1655 status = irdma_create_hmc_objs(rf, true, rf->rdma_ver); in irdma_hmc_setup()
1662 * @rf: RDMA PCI function
1665 irdma_del_init_mem(struct irdma_pci_f *rf) in irdma_del_init_mem() argument
1667 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_del_init_mem()
1671 vfree(rf->mem_rsrc); in irdma_del_init_mem()
1672 rf->mem_rsrc = NULL; in irdma_del_init_mem()
1673 irdma_free_dma_mem(&rf->hw, &rf->obj_mem); in irdma_del_init_mem()
1674 if (rf->rdma_ver != IRDMA_GEN_1) { in irdma_del_init_mem()
1675 kfree(rf->allocated_ws_nodes); in irdma_del_init_mem()
1676 rf->allocated_ws_nodes = NULL; in irdma_del_init_mem()
1679 kfree(rf->ceqlist); in irdma_del_init_mem()
1680 rf->ceqlist = NULL; in irdma_del_init_mem()
1681 kfree(rf->iw_msixtbl); in irdma_del_init_mem()
1682 rf->iw_msixtbl = NULL; in irdma_del_init_mem()
1683 kfree(rf->hmc_info_mem); in irdma_del_init_mem()
1684 rf->hmc_info_mem = NULL; in irdma_del_init_mem()
1688 * @rf: RDMA PCI function
1695 irdma_initialize_dev(struct irdma_pci_f *rf) in irdma_initialize_dev() argument
1698 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_initialize_dev()
1707 rf->hmc_info_mem = kzalloc(size, GFP_KERNEL); in irdma_initialize_dev()
1708 if (!rf->hmc_info_mem) in irdma_initialize_dev()
1711 rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem; in irdma_initialize_dev()
1712 dev->hmc_info = &rf->hw.hmc; in irdma_initialize_dev()
1714 (rf->pble_rsrc + 1); in irdma_initialize_dev()
1716 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE, in irdma_initialize_dev()
1724 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE, in irdma_initialize_dev()
1732 info.bar0 = rf->hw.hw_addr; in irdma_initialize_dev()
1733 info.hmc_fn_id = rf->peer_info->pf_id; in irdma_initialize_dev()
1737 info.debug_mask = rf->sc_dev.debug_mask; in irdma_initialize_dev()
1738 info.hw = &rf->hw; in irdma_initialize_dev()
1739 status = irdma_sc_dev_init(&rf->sc_dev, &info); in irdma_initialize_dev()
1745 kfree(rf->hmc_info_mem); in irdma_initialize_dev()
1746 rf->hmc_info_mem = NULL; in irdma_initialize_dev()
1763 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_INIT, "state = %d\n", iwdev->init_state); in irdma_rt_deinit_hw()
1767 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) in irdma_rt_deinit_hw()
1768 irdma_del_local_mac_entry(iwdev->rf, in irdma_rt_deinit_hw()
1775 if (iwdev->rf->en_rem_endpoint_trk) { in irdma_rt_deinit_hw()
1776 qp.dev = &iwdev->rf->sc_dev; in irdma_rt_deinit_hw()
1785 iwdev->rf->reset); in irdma_rt_deinit_hw()
1791 iwdev->rf->reset); in irdma_rt_deinit_hw()
1808 irdma_setup_init_state(struct irdma_pci_f *rf) in irdma_setup_init_state() argument
1812 status = irdma_save_msix_info(rf); in irdma_setup_init_state()
1816 rf->obj_mem.size = 8192; in irdma_setup_init_state()
1817 rf->obj_mem.va = irdma_allocate_dma_mem(&rf->hw, &rf->obj_mem, in irdma_setup_init_state()
1818 rf->obj_mem.size, in irdma_setup_init_state()
1820 if (!rf->obj_mem.va) { in irdma_setup_init_state()
1825 rf->obj_next = rf->obj_mem; in irdma_setup_init_state()
1826 status = irdma_initialize_dev(rf); in irdma_setup_init_state()
1833 irdma_free_dma_mem(&rf->hw, &rf->obj_mem); in irdma_setup_init_state()
1835 kfree(rf->iw_msixtbl); in irdma_setup_init_state()
1836 rf->iw_msixtbl = NULL; in irdma_setup_init_state()
1849 iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds, in irdma_get_used_rsrc()
1850 iwdev->rf->max_pd); in irdma_get_used_rsrc()
1851 iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps, in irdma_get_used_rsrc()
1852 iwdev->rf->max_qp); in irdma_get_used_rsrc()
1853 iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs, in irdma_get_used_rsrc()
1854 iwdev->rf->max_cq); in irdma_get_used_rsrc()
1855 iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs, in irdma_get_used_rsrc()
1856 iwdev->rf->max_mr); in irdma_get_used_rsrc()
1860 irdma_ctrl_deinit_hw(struct irdma_pci_f *rf) in irdma_ctrl_deinit_hw() argument
1862 enum init_completion_state state = rf->init_state; in irdma_ctrl_deinit_hw()
1864 rf->init_state = INVALID_STATE; in irdma_ctrl_deinit_hw()
1865 if (rf->rsrc_created) { in irdma_ctrl_deinit_hw()
1866 irdma_destroy_aeq(rf); in irdma_ctrl_deinit_hw()
1867 irdma_destroy_pble_prm(rf->pble_rsrc); in irdma_ctrl_deinit_hw()
1868 irdma_del_ceqs(rf); in irdma_ctrl_deinit_hw()
1869 rf->rsrc_created = false; in irdma_ctrl_deinit_hw()
1874 irdma_del_ceq_0(rf); in irdma_ctrl_deinit_hw()
1877 irdma_destroy_ccq(rf); in irdma_ctrl_deinit_hw()
1881 irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true, in irdma_ctrl_deinit_hw()
1882 rf->reset, rf->rdma_ver); in irdma_ctrl_deinit_hw()
1885 irdma_destroy_cqp(rf, !rf->reset); in irdma_ctrl_deinit_hw()
1888 irdma_del_init_mem(rf); in irdma_ctrl_deinit_hw()
1892 irdma_dev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state); in irdma_ctrl_deinit_hw()
1909 struct irdma_pci_f *rf = iwdev->rf; in irdma_rt_init_hw() local
1910 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_rt_init_hw()
1920 vsi_info.register_qset = rf->gen_ops.register_qset; in irdma_rt_init_hw()
1921 vsi_info.unregister_qset = rf->gen_ops.unregister_qset; in irdma_rt_init_hw()
1925 status = irdma_setup_cm_core(iwdev, rf->rdma_ver); in irdma_rt_init_hw()
1953 if (iwdev->rf->en_rem_endpoint_trk) { in irdma_rt_init_hw()
1962 if (!rf->rsrc_created) { in irdma_rt_init_hw()
1963 status = irdma_setup_ceqs(rf, &iwdev->vsi); in irdma_rt_init_hw()
1969 status = irdma_hmc_init_pble(&rf->sc_dev, in irdma_rt_init_hw()
1970 rf->pble_rsrc); in irdma_rt_init_hw()
1972 irdma_del_ceqs(rf); in irdma_rt_init_hw()
1978 status = irdma_setup_aeq(rf); in irdma_rt_init_hw()
1980 irdma_destroy_pble_prm(rf->pble_rsrc); in irdma_rt_init_hw()
1981 irdma_del_ceqs(rf); in irdma_rt_init_hw()
1985 rf->rsrc_created = true; in irdma_rt_init_hw()
1988 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) in irdma_rt_init_hw()
2006 dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n", in irdma_rt_init_hw()
2015 * @rf: RDMA PCI function
2017 * Create admin queues, HMC obejcts and RF resource objects
2020 irdma_ctrl_init_hw(struct irdma_pci_f *rf) in irdma_ctrl_init_hw() argument
2022 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_ctrl_init_hw()
2026 status = irdma_setup_init_state(rf); in irdma_ctrl_init_hw()
2029 rf->init_state = INITIAL_STATE; in irdma_ctrl_init_hw()
2031 status = irdma_create_cqp(rf); in irdma_ctrl_init_hw()
2034 rf->init_state = CQP_CREATED; in irdma_ctrl_init_hw()
2037 if (rf->rdma_ver != IRDMA_GEN_1) { in irdma_ctrl_init_hw()
2043 status = irdma_hmc_setup(rf); in irdma_ctrl_init_hw()
2046 rf->init_state = HMC_OBJS_CREATED; in irdma_ctrl_init_hw()
2048 status = irdma_initialize_hw_rsrc(rf); in irdma_ctrl_init_hw()
2051 rf->init_state = HW_RSRC_INITIALIZED; in irdma_ctrl_init_hw()
2053 status = irdma_create_ccq(rf); in irdma_ctrl_init_hw()
2056 rf->init_state = CCQ_CREATED; in irdma_ctrl_init_hw()
2058 status = irdma_setup_ceq_0(rf); in irdma_ctrl_init_hw()
2061 rf->init_state = CEQ0_CREATED; in irdma_ctrl_init_hw()
2063 rf->cqp_cmpl_wq = alloc_ordered_workqueue("cqp_cmpl_wq", in irdma_ctrl_init_hw()
2065 if (!rf->cqp_cmpl_wq) { in irdma_ctrl_init_hw()
2069 INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker); in irdma_ctrl_init_hw()
2075 rf->init_state, status); in irdma_ctrl_init_hw()
2076 irdma_ctrl_deinit_hw(rf); in irdma_ctrl_init_hw()
2082 * @rf: RDMA PCI function
2085 irdma_set_hw_rsrc(struct irdma_pci_f *rf) in irdma_set_hw_rsrc() argument
2087 rf->allocated_qps = (void *)(rf->mem_rsrc + in irdma_set_hw_rsrc()
2088 (sizeof(struct irdma_arp_entry) * rf->arp_table_size)); in irdma_set_hw_rsrc()
2089 rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)]; in irdma_set_hw_rsrc()
2090 rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)]; in irdma_set_hw_rsrc()
2091 rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)]; in irdma_set_hw_rsrc()
2092 rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)]; in irdma_set_hw_rsrc()
2093 rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)]; in irdma_set_hw_rsrc()
2094 rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)]; in irdma_set_hw_rsrc()
2096 rf->qp_table = (struct irdma_qp **) in irdma_set_hw_rsrc()
2097 (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]); in irdma_set_hw_rsrc()
2098 rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]); in irdma_set_hw_rsrc()
2100 spin_lock_init(&rf->rsrc_lock); in irdma_set_hw_rsrc()
2101 spin_lock_init(&rf->arp_lock); in irdma_set_hw_rsrc()
2102 spin_lock_init(&rf->qptable_lock); in irdma_set_hw_rsrc()
2103 spin_lock_init(&rf->cqtable_lock); in irdma_set_hw_rsrc()
2104 spin_lock_init(&rf->qh_list_lock); in irdma_set_hw_rsrc()
2109 * @rf: RDMA PCI function
2111 static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf){ in irdma_calc_mem_rsrc_size() argument
2114 rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size; in irdma_calc_mem_rsrc_size()
2115 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp); in irdma_calc_mem_rsrc_size()
2116 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr); in irdma_calc_mem_rsrc_size()
2117 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq); in irdma_calc_mem_rsrc_size()
2118 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd); in irdma_calc_mem_rsrc_size()
2119 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size); in irdma_calc_mem_rsrc_size()
2120 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah); in irdma_calc_mem_rsrc_size()
2121 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg); in irdma_calc_mem_rsrc_size()
2122 rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp; in irdma_calc_mem_rsrc_size()
2123 rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq; in irdma_calc_mem_rsrc_size()
2130 * @rf: RDMA PCI function
2133 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf) in irdma_initialize_hw_rsrc() argument
2139 if (rf->rdma_ver != IRDMA_GEN_1) { in irdma_initialize_hw_rsrc()
2140 rf->allocated_ws_nodes = in irdma_initialize_hw_rsrc()
2143 if (!rf->allocated_ws_nodes) in irdma_initialize_hw_rsrc()
2146 set_bit(0, rf->allocated_ws_nodes); in irdma_initialize_hw_rsrc()
2147 rf->max_ws_node_id = IRDMA_MAX_WS_NODES; in irdma_initialize_hw_rsrc()
2149 rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size; in irdma_initialize_hw_rsrc()
2150 rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt; in irdma_initialize_hw_rsrc()
2151 rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt; in irdma_initialize_hw_rsrc()
2152 rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt; in irdma_initialize_hw_rsrc()
2153 rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds; in irdma_initialize_hw_rsrc()
2154 rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt; in irdma_initialize_hw_rsrc()
2155 rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt; in irdma_initialize_hw_rsrc()
2156 rf->max_mcg = rf->max_qp; in irdma_initialize_hw_rsrc()
2158 rsrc_size = irdma_calc_mem_rsrc_size(rf); in irdma_initialize_hw_rsrc()
2159 rf->mem_rsrc = vzalloc(rsrc_size); in irdma_initialize_hw_rsrc()
2160 if (!rf->mem_rsrc) { in irdma_initialize_hw_rsrc()
2165 rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc; in irdma_initialize_hw_rsrc()
2167 irdma_set_hw_rsrc(rf); in irdma_initialize_hw_rsrc()
2169 set_bit(0, rf->allocated_mrs); in irdma_initialize_hw_rsrc()
2170 set_bit(0, rf->allocated_qps); in irdma_initialize_hw_rsrc()
2171 set_bit(0, rf->allocated_cqs); in irdma_initialize_hw_rsrc()
2172 set_bit(0, rf->allocated_pds); in irdma_initialize_hw_rsrc()
2173 set_bit(0, rf->allocated_arps); in irdma_initialize_hw_rsrc()
2174 set_bit(0, rf->allocated_ahs); in irdma_initialize_hw_rsrc()
2175 set_bit(0, rf->allocated_mcgs); in irdma_initialize_hw_rsrc()
2176 set_bit(2, rf->allocated_qps); /* qp 2 IEQ */ in irdma_initialize_hw_rsrc()
2177 set_bit(1, rf->allocated_qps); /* qp 1 ILQ */ in irdma_initialize_hw_rsrc()
2178 set_bit(IRDMA_REM_ENDPOINT_TRK_QPID, rf->allocated_qps); /* qp 3 Remote Endpt trk */ in irdma_initialize_hw_rsrc()
2179 set_bit(1, rf->allocated_cqs); in irdma_initialize_hw_rsrc()
2180 set_bit(1, rf->allocated_pds); in irdma_initialize_hw_rsrc()
2181 set_bit(2, rf->allocated_cqs); in irdma_initialize_hw_rsrc()
2182 set_bit(2, rf->allocated_pds); in irdma_initialize_hw_rsrc()
2184 INIT_LIST_HEAD(&rf->mc_qht_list.list); in irdma_initialize_hw_rsrc()
2186 mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14); in irdma_initialize_hw_rsrc()
2187 rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits)); in irdma_initialize_hw_rsrc()
2192 kfree(rf->allocated_ws_nodes); in irdma_initialize_hw_rsrc()
2193 rf->allocated_ws_nodes = NULL; in irdma_initialize_hw_rsrc()
2200 * @rf: RDMA PCI function
2204 irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq) in irdma_cqp_ce_handler() argument
2207 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_cqp_ce_handler()
2215 spin_lock_irqsave(&rf->cqp.compl_lock, flags); in irdma_cqp_ce_handler()
2217 spin_unlock_irqrestore(&rf->cqp.compl_lock, flags); in irdma_cqp_ce_handler()
2227 irdma_dev_err(&rf->iwdev->ibdev, in irdma_cqp_ce_handler()
2238 irdma_complete_cqp_request(&rf->cqp, cqp_request); in irdma_cqp_ce_handler()
2257 struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f, in cqp_compl_worker() local
2259 struct irdma_sc_cq *cq = &rf->ccq.sc_cq; in cqp_compl_worker()
2261 irdma_cqp_ce_handler(rf, cq); in cqp_compl_worker()
2321 * @rf: RDMA PCI function
2325 irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx) in irdma_del_local_mac_entry() argument
2327 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_del_local_mac_entry()
2343 irdma_handle_cqp_op(rf, cqp_request); in irdma_del_local_mac_entry()
2350 * @rf: RDMA PCI function
2355 irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx) in irdma_add_local_mac_entry() argument
2358 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_add_local_mac_entry()
2377 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_add_local_mac_entry()
2385 * @rf: RDMA PCI function
2393 irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx) in irdma_alloc_local_mac_entry() argument
2395 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_alloc_local_mac_entry()
2409 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_alloc_local_mac_entry()
2433 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port); in irdma_cqp_manage_apbvt_cmd()
2444 cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp; in irdma_cqp_manage_apbvt_cmd()
2446 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, in irdma_cqp_manage_apbvt_cmd()
2450 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); in irdma_cqp_manage_apbvt_cmd()
2451 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); in irdma_cqp_manage_apbvt_cmd()
2524 * @rf: RDMA PCI function
2530 irdma_manage_arp_cache(struct irdma_pci_f *rf, const unsigned char *mac_addr, in irdma_manage_arp_cache() argument
2538 arp_index = irdma_arp_table(rf, ip_addr, mac_addr, action); in irdma_manage_arp_cache()
2542 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false); in irdma_manage_arp_cache()
2556 cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp; in irdma_manage_arp_cache()
2561 cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp; in irdma_manage_arp_cache()
2566 irdma_handle_cqp_op(rf, cqp_request); in irdma_manage_arp_cache()
2567 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_manage_arp_cache()
2599 struct irdma_cqp *iwcqp = &iwdev->rf->cqp; in irdma_manage_qhash()
2651 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM, in irdma_manage_qhash()
2661 irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM, in irdma_manage_qhash()
2672 cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp; in irdma_manage_qhash()
2676 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); in irdma_manage_qhash()
2687 * @rf: RDMA PCI function
2693 irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp, in irdma_hw_flush_wqes() argument
2702 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); in irdma_hw_flush_wqes()
2713 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_hw_flush_wqes()
2717 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_hw_flush_wqes()
2739 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_VERBS, in irdma_hw_flush_wqes()
2741 iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state, in irdma_hw_flush_wqes()
2746 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_hw_flush_wqes()
2753 * @rf: RDMA PCI function
2759 irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp, in irdma_gen_ae() argument
2766 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); in irdma_gen_ae()
2778 irdma_handle_cqp_op(rf, cqp_request); in irdma_gen_ae()
2779 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_gen_ae()
2786 struct irdma_pci_f *rf = iwqp->iwdev->rf; in irdma_flush_wqes() local
2822 (void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info, in irdma_flush_wqes()