Lines Matching full:rf

72  * @rf: RDMA PCI function
75 static void irdma_puda_ce_handler(struct irdma_pci_f *rf, in irdma_puda_ce_handler() argument
78 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_puda_ce_handler()
102 * @rf: RDMA PCI function
105 static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq) in irdma_process_ceq() argument
107 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_process_ceq()
127 queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work); in irdma_process_ceq()
130 irdma_puda_ce_handler(rf, cq); in irdma_process_ceq()
211 * @rf: RDMA PCI function
213 static void irdma_process_aeq(struct irdma_pci_f *rf) in irdma_process_aeq() argument
215 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_process_aeq()
216 struct irdma_aeq *aeq = &rf->aeq; in irdma_process_aeq()
225 struct irdma_device *iwdev = rf->iwdev; in irdma_process_aeq()
246 spin_lock_irqsave(&rf->qptable_lock, flags); in irdma_process_aeq()
247 iwqp = rf->qp_table[info->qp_cq_id]; in irdma_process_aeq()
249 spin_unlock_irqrestore(&rf->qptable_lock, in irdma_process_aeq()
261 spin_unlock_irqrestore(&rf->qptable_lock, flags); in irdma_process_aeq()
343 spin_lock_irqsave(&rf->cqtable_lock, flags); in irdma_process_aeq()
344 iwcq = rf->cq_table[info->qp_cq_id]; in irdma_process_aeq()
346 spin_unlock_irqrestore(&rf->cqtable_lock, in irdma_process_aeq()
353 spin_unlock_irqrestore(&rf->cqtable_lock, flags); in irdma_process_aeq()
448 struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet); in irdma_dpc() local
450 if (rf->msix_shared) in irdma_dpc()
451 irdma_process_ceq(rf, rf->ceqlist); in irdma_dpc()
452 irdma_process_aeq(rf); in irdma_dpc()
453 irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx); in irdma_dpc()
463 struct irdma_pci_f *rf = iwceq->rf; in irdma_ceq_dpc() local
465 irdma_process_ceq(rf, iwceq); in irdma_ceq_dpc()
466 irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx); in irdma_ceq_dpc()
471 * @rf: RDMA PCI function
476 static int irdma_save_msix_info(struct irdma_pci_f *rf) in irdma_save_msix_info() argument
485 if (!rf->msix_count) in irdma_save_msix_info()
488 size = sizeof(struct irdma_msix_vector) * rf->msix_count; in irdma_save_msix_info()
489 size += struct_size(iw_qvlist, qv_info, rf->msix_count); in irdma_save_msix_info()
490 rf->iw_msixtbl = kzalloc(size, GFP_KERNEL); in irdma_save_msix_info()
491 if (!rf->iw_msixtbl) in irdma_save_msix_info()
494 rf->iw_qvlist = (struct irdma_qvlist_info *) in irdma_save_msix_info()
495 (&rf->iw_msixtbl[rf->msix_count]); in irdma_save_msix_info()
496 iw_qvlist = rf->iw_qvlist; in irdma_save_msix_info()
498 iw_qvlist->num_vectors = rf->msix_count; in irdma_save_msix_info()
499 if (rf->msix_count <= num_online_cpus()) in irdma_save_msix_info()
500 rf->msix_shared = true; in irdma_save_msix_info()
502 pmsix = rf->msix_entries; in irdma_save_msix_info()
503 for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) { in irdma_save_msix_info()
504 rf->iw_msixtbl[i].idx = pmsix->entry; in irdma_save_msix_info()
505 rf->iw_msixtbl[i].irq = pmsix->vector; in irdma_save_msix_info()
506 rf->iw_msixtbl[i].cpu_affinity = ceq_idx; in irdma_save_msix_info()
509 if (rf->msix_shared) in irdma_save_msix_info()
518 iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx; in irdma_save_msix_info()
532 struct irdma_pci_f *rf = data; in irdma_irq_handler() local
534 tasklet_schedule(&rf->dpc_tasklet); in irdma_irq_handler()
549 ibdev_err(to_ibdev(&iwceq->rf->sc_dev), "expected irq = %d received irq = %d\n", in irdma_ceq_handler()
558 * @rf: RDMA PCI function
564 static void irdma_destroy_irq(struct irdma_pci_f *rf, in irdma_destroy_irq() argument
567 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_irq()
572 if (rf == dev_id) { in irdma_destroy_irq()
573 tasklet_kill(&rf->dpc_tasklet); in irdma_destroy_irq()
583 * @rf: RDMA PCI function
588 static void irdma_destroy_cqp(struct irdma_pci_f *rf) in irdma_destroy_cqp() argument
590 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_cqp()
591 struct irdma_cqp *cqp = &rf->cqp; in irdma_destroy_cqp()
598 irdma_cleanup_pending_cqp_op(rf); in irdma_destroy_cqp()
608 static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf) in irdma_destroy_virt_aeq() argument
610 struct irdma_aeq *aeq = &rf->aeq; in irdma_destroy_virt_aeq()
614 irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt); in irdma_destroy_virt_aeq()
615 irdma_free_pble(rf->pble_rsrc, &aeq->palloc); in irdma_destroy_virt_aeq()
621 * @rf: RDMA PCI function
627 static void irdma_destroy_aeq(struct irdma_pci_f *rf) in irdma_destroy_aeq() argument
629 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_aeq()
630 struct irdma_aeq *aeq = &rf->aeq; in irdma_destroy_aeq()
633 if (!rf->msix_shared) { in irdma_destroy_aeq()
634 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false); in irdma_destroy_aeq()
635 irdma_destroy_irq(rf, rf->iw_msixtbl, rf); in irdma_destroy_aeq()
637 if (rf->reset) in irdma_destroy_aeq()
647 irdma_destroy_virt_aeq(rf); in irdma_destroy_aeq()
657 * @rf: RDMA PCI function
663 static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq) in irdma_destroy_ceq() argument
665 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_ceq()
668 if (rf->reset) in irdma_destroy_ceq()
689 * @rf: RDMA PCI function
693 static void irdma_del_ceq_0(struct irdma_pci_f *rf) in irdma_del_ceq_0() argument
695 struct irdma_ceq *iwceq = rf->ceqlist; in irdma_del_ceq_0()
698 if (rf->msix_shared) { in irdma_del_ceq_0()
699 msix_vec = &rf->iw_msixtbl[0]; in irdma_del_ceq_0()
700 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, in irdma_del_ceq_0()
703 irdma_destroy_irq(rf, msix_vec, rf); in irdma_del_ceq_0()
705 msix_vec = &rf->iw_msixtbl[1]; in irdma_del_ceq_0()
706 irdma_destroy_irq(rf, msix_vec, iwceq); in irdma_del_ceq_0()
709 irdma_destroy_ceq(rf, iwceq); in irdma_del_ceq_0()
710 rf->sc_dev.ceq_valid = false; in irdma_del_ceq_0()
711 rf->ceqs_count = 0; in irdma_del_ceq_0()
716 * @rf: RDMA PCI function
721 static void irdma_del_ceqs(struct irdma_pci_f *rf) in irdma_del_ceqs() argument
723 struct irdma_ceq *iwceq = &rf->ceqlist[1]; in irdma_del_ceqs()
727 if (rf->msix_shared) in irdma_del_ceqs()
728 msix_vec = &rf->iw_msixtbl[1]; in irdma_del_ceqs()
730 msix_vec = &rf->iw_msixtbl[2]; in irdma_del_ceqs()
732 for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) { in irdma_del_ceqs()
733 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id, in irdma_del_ceqs()
735 irdma_destroy_irq(rf, msix_vec, iwceq); in irdma_del_ceqs()
736 irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq, in irdma_del_ceqs()
738 dma_free_coherent(rf->sc_dev.hw->device, iwceq->mem.size, in irdma_del_ceqs()
742 rf->ceqs_count = 1; in irdma_del_ceqs()
747 * @rf: RDMA PCI function
752 static void irdma_destroy_ccq(struct irdma_pci_f *rf) in irdma_destroy_ccq() argument
754 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_ccq()
755 struct irdma_ccq *ccq = &rf->ccq; in irdma_destroy_ccq()
758 if (rf->cqp_cmpl_wq) in irdma_destroy_ccq()
759 destroy_workqueue(rf->cqp_cmpl_wq); in irdma_destroy_ccq()
761 if (!rf->reset) in irdma_destroy_ccq()
830 * @rf: RDMA PCI function
837 static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged, in irdma_create_hmc_objs() argument
840 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_hmc_objs()
846 info.entry_type = rf->sd_type; in irdma_create_hmc_objs()
885 * @rf: RDMA PCI function
894 static int irdma_obj_aligned_mem(struct irdma_pci_f *rf, in irdma_obj_aligned_mem() argument
901 va = (unsigned long)rf->obj_next.va; in irdma_obj_aligned_mem()
907 memptr->pa = rf->obj_next.pa + extra; in irdma_obj_aligned_mem()
909 if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size)) in irdma_obj_aligned_mem()
912 rf->obj_next.va = (u8 *)memptr->va + size; in irdma_obj_aligned_mem()
913 rf->obj_next.pa = memptr->pa + size; in irdma_obj_aligned_mem()
920 * @rf: RDMA PCI function
925 static int irdma_create_cqp(struct irdma_pci_f *rf) in irdma_create_cqp() argument
929 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_cqp()
931 struct irdma_cqp *cqp = &rf->cqp; in irdma_create_cqp()
956 status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx), in irdma_create_cqp()
970 cqp_init_info.hmc_profile = rf->rsrc_profile; in irdma_create_cqp()
972 cqp_init_info.protocol_used = rf->protocol_used; in irdma_create_cqp()
974 switch (rf->rdma_ver) { in irdma_create_cqp()
1026 * @rf: RDMA PCI function
1031 static int irdma_create_ccq(struct irdma_pci_f *rf) in irdma_create_ccq() argument
1033 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_ccq()
1035 struct irdma_ccq *ccq = &rf->ccq; in irdma_create_ccq()
1049 status = irdma_obj_aligned_mem(rf, &ccq->shadow_area, in irdma_create_ccq()
1065 info.vsi = &rf->default_vsi; in irdma_create_ccq()
1090 status = irdma_alloc_local_mac_entry(iwdev->rf, in irdma_alloc_set_mac()
1093 status = irdma_add_local_mac_entry(iwdev->rf, in irdma_alloc_set_mac()
1097 irdma_del_local_mac_entry(iwdev->rf, in irdma_alloc_set_mac()
1106 * @rf: RDMA PCI function
1114 static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, in irdma_cfg_ceq_vector() argument
1119 if (rf->msix_shared && !ceq_id) { in irdma_cfg_ceq_vector()
1121 "irdma-%s-AEQCEQ-0", dev_name(&rf->pcidev->dev)); in irdma_cfg_ceq_vector()
1122 tasklet_setup(&rf->dpc_tasklet, irdma_dpc); in irdma_cfg_ceq_vector()
1124 msix_vec->name, rf); in irdma_cfg_ceq_vector()
1128 dev_name(&rf->pcidev->dev), ceq_id); in irdma_cfg_ceq_vector()
1138 ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n"); in irdma_cfg_ceq_vector()
1143 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true); in irdma_cfg_ceq_vector()
1150 * @rf: RDMA PCI function
1155 static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf) in irdma_cfg_aeq_vector() argument
1157 struct irdma_msix_vector *msix_vec = rf->iw_msixtbl; in irdma_cfg_aeq_vector()
1160 if (!rf->msix_shared) { in irdma_cfg_aeq_vector()
1162 "irdma-%s-AEQ", dev_name(&rf->pcidev->dev)); in irdma_cfg_aeq_vector()
1163 tasklet_setup(&rf->dpc_tasklet, irdma_dpc); in irdma_cfg_aeq_vector()
1165 msix_vec->name, rf); in irdma_cfg_aeq_vector()
1168 ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n"); in irdma_cfg_aeq_vector()
1172 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true); in irdma_cfg_aeq_vector()
1179 * @rf: RDMA PCI function
1187 static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, in irdma_create_ceq() argument
1192 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_ceq()
1196 iwceq->rf = rf; in irdma_create_ceq()
1197 ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt, in irdma_create_ceq()
1216 status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq, in irdma_create_ceq()
1233 * @rf: RDMA PCI function
1239 static int irdma_setup_ceq_0(struct irdma_pci_f *rf) in irdma_setup_ceq_0() argument
1247 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs); in irdma_setup_ceq_0()
1248 rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL); in irdma_setup_ceq_0()
1249 if (!rf->ceqlist) { in irdma_setup_ceq_0()
1254 iwceq = &rf->ceqlist[0]; in irdma_setup_ceq_0()
1255 status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi); in irdma_setup_ceq_0()
1257 ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n", in irdma_setup_ceq_0()
1263 i = rf->msix_shared ? 0 : 1; in irdma_setup_ceq_0()
1264 msix_vec = &rf->iw_msixtbl[i]; in irdma_setup_ceq_0()
1267 status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec); in irdma_setup_ceq_0()
1269 irdma_destroy_ceq(rf, iwceq); in irdma_setup_ceq_0()
1273 irdma_ena_intr(&rf->sc_dev, msix_vec->idx); in irdma_setup_ceq_0()
1274 rf->ceqs_count++; in irdma_setup_ceq_0()
1277 if (status && !rf->ceqs_count) { in irdma_setup_ceq_0()
1278 kfree(rf->ceqlist); in irdma_setup_ceq_0()
1279 rf->ceqlist = NULL; in irdma_setup_ceq_0()
1282 rf->sc_dev.ceq_valid = true; in irdma_setup_ceq_0()
1289 * @rf: RDMA PCI function
1296 static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi) in irdma_setup_ceqs() argument
1305 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs); in irdma_setup_ceqs()
1306 i = (rf->msix_shared) ? 1 : 2; in irdma_setup_ceqs()
1308 iwceq = &rf->ceqlist[ceq_id]; in irdma_setup_ceqs()
1309 status = irdma_create_ceq(rf, iwceq, ceq_id, vsi); in irdma_setup_ceqs()
1311 ibdev_dbg(&rf->iwdev->ibdev, in irdma_setup_ceqs()
1316 msix_vec = &rf->iw_msixtbl[i]; in irdma_setup_ceqs()
1319 status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec); in irdma_setup_ceqs()
1321 irdma_destroy_ceq(rf, iwceq); in irdma_setup_ceqs()
1324 irdma_ena_intr(&rf->sc_dev, msix_vec->idx); in irdma_setup_ceqs()
1325 rf->ceqs_count++; in irdma_setup_ceqs()
1331 irdma_del_ceqs(rf); in irdma_setup_ceqs()
1336 static int irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size) in irdma_create_virt_aeq() argument
1338 struct irdma_aeq *aeq = &rf->aeq; in irdma_create_virt_aeq()
1343 if (rf->rdma_ver < IRDMA_GEN_2) in irdma_create_virt_aeq()
1353 status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true); in irdma_create_virt_aeq()
1360 status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt); in irdma_create_virt_aeq()
1362 irdma_free_pble(rf->pble_rsrc, &aeq->palloc); in irdma_create_virt_aeq()
1372 * @rf: RDMA PCI function
1377 static int irdma_create_aeq(struct irdma_pci_f *rf) in irdma_create_aeq() argument
1380 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_aeq()
1381 struct irdma_aeq *aeq = &rf->aeq; in irdma_create_aeq()
1382 struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info; in irdma_create_aeq()
1384 u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1; in irdma_create_aeq()
1400 status = irdma_create_virt_aeq(rf, aeq_size); in irdma_create_aeq()
1414 info.msix_idx = rf->iw_msixtbl->idx; in irdma_create_aeq()
1427 irdma_destroy_virt_aeq(rf); in irdma_create_aeq()
1439 * @rf: RDMA PCI function
1444 static int irdma_setup_aeq(struct irdma_pci_f *rf) in irdma_setup_aeq() argument
1446 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_setup_aeq()
1449 status = irdma_create_aeq(rf); in irdma_setup_aeq()
1453 status = irdma_cfg_aeq_vector(rf); in irdma_setup_aeq()
1455 irdma_destroy_aeq(rf); in irdma_setup_aeq()
1459 if (!rf->msix_shared) in irdma_setup_aeq()
1460 irdma_ena_intr(dev, rf->iw_msixtbl[0].idx); in irdma_setup_aeq()
1482 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768); in irdma_initialize_ilq()
1512 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768); in irdma_initialize_ieq()
1530 struct irdma_pci_f *rf = iwdev->rf; in irdma_reinitialize_ieq() local
1534 iwdev->rf->reset = true; in irdma_reinitialize_ieq()
1535 rf->gen_ops.request_reset(rf); in irdma_reinitialize_ieq()
1541 * @rf: RDMA PCI function
1547 static int irdma_hmc_setup(struct irdma_pci_f *rf) in irdma_hmc_setup() argument
1552 qpcnt = rsrc_limits_table[rf->limits_sel].qplimit; in irdma_hmc_setup()
1554 rf->sd_type = IRDMA_SD_TYPE_DIRECT; in irdma_hmc_setup()
1555 status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt); in irdma_hmc_setup()
1559 status = irdma_create_hmc_objs(rf, true, rf->rdma_ver); in irdma_hmc_setup()
1566 * @rf: RDMA PCI function
1568 static void irdma_del_init_mem(struct irdma_pci_f *rf) in irdma_del_init_mem() argument
1570 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_del_init_mem()
1574 vfree(rf->mem_rsrc); in irdma_del_init_mem()
1575 rf->mem_rsrc = NULL; in irdma_del_init_mem()
1576 dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va, in irdma_del_init_mem()
1577 rf->obj_mem.pa); in irdma_del_init_mem()
1578 rf->obj_mem.va = NULL; in irdma_del_init_mem()
1579 if (rf->rdma_ver != IRDMA_GEN_1) { in irdma_del_init_mem()
1580 bitmap_free(rf->allocated_ws_nodes); in irdma_del_init_mem()
1581 rf->allocated_ws_nodes = NULL; in irdma_del_init_mem()
1583 kfree(rf->ceqlist); in irdma_del_init_mem()
1584 rf->ceqlist = NULL; in irdma_del_init_mem()
1585 kfree(rf->iw_msixtbl); in irdma_del_init_mem()
1586 rf->iw_msixtbl = NULL; in irdma_del_init_mem()
1587 kfree(rf->hmc_info_mem); in irdma_del_init_mem()
1588 rf->hmc_info_mem = NULL; in irdma_del_init_mem()
1593 * @rf: RDMA PCI function
1599 static int irdma_initialize_dev(struct irdma_pci_f *rf) in irdma_initialize_dev() argument
1602 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_initialize_dev()
1611 rf->hmc_info_mem = kzalloc(size, GFP_KERNEL); in irdma_initialize_dev()
1612 if (!rf->hmc_info_mem) in irdma_initialize_dev()
1615 rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem; in irdma_initialize_dev()
1616 dev->hmc_info = &rf->hw.hmc; in irdma_initialize_dev()
1618 (rf->pble_rsrc + 1); in irdma_initialize_dev()
1620 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE, in irdma_initialize_dev()
1628 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE, in irdma_initialize_dev()
1636 info.bar0 = rf->hw.hw_addr; in irdma_initialize_dev()
1637 info.hmc_fn_id = rf->pf_id; in irdma_initialize_dev()
1638 info.hw = &rf->hw; in irdma_initialize_dev()
1639 status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info); in irdma_initialize_dev()
1645 kfree(rf->hmc_info_mem); in irdma_initialize_dev()
1646 rf->hmc_info_mem = NULL; in irdma_initialize_dev()
1664 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) in irdma_rt_deinit_hw()
1665 irdma_del_local_mac_entry(iwdev->rf, in irdma_rt_deinit_hw()
1674 iwdev->rf->reset); in irdma_rt_deinit_hw()
1680 iwdev->rf->reset); in irdma_rt_deinit_hw()
1696 static int irdma_setup_init_state(struct irdma_pci_f *rf) in irdma_setup_init_state() argument
1700 status = irdma_save_msix_info(rf); in irdma_setup_init_state()
1704 rf->hw.device = &rf->pcidev->dev; in irdma_setup_init_state()
1705 rf->obj_mem.size = ALIGN(8192, IRDMA_HW_PAGE_SIZE); in irdma_setup_init_state()
1706 rf->obj_mem.va = dma_alloc_coherent(rf->hw.device, rf->obj_mem.size, in irdma_setup_init_state()
1707 &rf->obj_mem.pa, GFP_KERNEL); in irdma_setup_init_state()
1708 if (!rf->obj_mem.va) { in irdma_setup_init_state()
1713 rf->obj_next = rf->obj_mem; in irdma_setup_init_state()
1714 status = irdma_initialize_dev(rf); in irdma_setup_init_state()
1721 dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va, in irdma_setup_init_state()
1722 rf->obj_mem.pa); in irdma_setup_init_state()
1723 rf->obj_mem.va = NULL; in irdma_setup_init_state()
1725 kfree(rf->iw_msixtbl); in irdma_setup_init_state()
1726 rf->iw_msixtbl = NULL; in irdma_setup_init_state()
1738 iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds, in irdma_get_used_rsrc()
1739 iwdev->rf->max_pd); in irdma_get_used_rsrc()
1740 iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps, in irdma_get_used_rsrc()
1741 iwdev->rf->max_qp); in irdma_get_used_rsrc()
1742 iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs, in irdma_get_used_rsrc()
1743 iwdev->rf->max_cq); in irdma_get_used_rsrc()
1744 iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs, in irdma_get_used_rsrc()
1745 iwdev->rf->max_mr); in irdma_get_used_rsrc()
1748 void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf) in irdma_ctrl_deinit_hw() argument
1750 enum init_completion_state state = rf->init_state; in irdma_ctrl_deinit_hw()
1752 rf->init_state = INVALID_STATE; in irdma_ctrl_deinit_hw()
1753 if (rf->rsrc_created) { in irdma_ctrl_deinit_hw()
1754 irdma_destroy_aeq(rf); in irdma_ctrl_deinit_hw()
1755 irdma_destroy_pble_prm(rf->pble_rsrc); in irdma_ctrl_deinit_hw()
1756 irdma_del_ceqs(rf); in irdma_ctrl_deinit_hw()
1757 rf->rsrc_created = false; in irdma_ctrl_deinit_hw()
1761 irdma_del_ceq_0(rf); in irdma_ctrl_deinit_hw()
1764 irdma_destroy_ccq(rf); in irdma_ctrl_deinit_hw()
1768 irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true, in irdma_ctrl_deinit_hw()
1769 rf->reset, rf->rdma_ver); in irdma_ctrl_deinit_hw()
1772 irdma_destroy_cqp(rf); in irdma_ctrl_deinit_hw()
1775 irdma_del_init_mem(rf); in irdma_ctrl_deinit_hw()
1779 ibdev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state); in irdma_ctrl_deinit_hw()
1795 struct irdma_pci_f *rf = iwdev->rf; in irdma_rt_init_hw() local
1796 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_rt_init_hw()
1805 vsi_info.register_qset = rf->gen_ops.register_qset; in irdma_rt_init_hw()
1806 vsi_info.unregister_qset = rf->gen_ops.unregister_qset; in irdma_rt_init_hw()
1810 status = irdma_setup_cm_core(iwdev, rf->rdma_ver); in irdma_rt_init_hw()
1838 if (!rf->rsrc_created) { in irdma_rt_init_hw()
1839 status = irdma_setup_ceqs(rf, &iwdev->vsi); in irdma_rt_init_hw()
1845 status = irdma_hmc_init_pble(&rf->sc_dev, in irdma_rt_init_hw()
1846 rf->pble_rsrc); in irdma_rt_init_hw()
1848 irdma_del_ceqs(rf); in irdma_rt_init_hw()
1854 status = irdma_setup_aeq(rf); in irdma_rt_init_hw()
1856 irdma_destroy_pble_prm(rf->pble_rsrc); in irdma_rt_init_hw()
1857 irdma_del_ceqs(rf); in irdma_rt_init_hw()
1861 rf->rsrc_created = true; in irdma_rt_init_hw()
1864 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) in irdma_rt_init_hw()
1882 dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n", in irdma_rt_init_hw()
1891 * @rf: RDMA PCI function
1893 * Create admin queues, HMC obejcts and RF resource objects
1895 int irdma_ctrl_init_hw(struct irdma_pci_f *rf) in irdma_ctrl_init_hw() argument
1897 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_ctrl_init_hw()
1900 status = irdma_setup_init_state(rf); in irdma_ctrl_init_hw()
1903 rf->init_state = INITIAL_STATE; in irdma_ctrl_init_hw()
1905 status = irdma_create_cqp(rf); in irdma_ctrl_init_hw()
1908 rf->init_state = CQP_CREATED; in irdma_ctrl_init_hw()
1910 status = irdma_hmc_setup(rf); in irdma_ctrl_init_hw()
1913 rf->init_state = HMC_OBJS_CREATED; in irdma_ctrl_init_hw()
1915 status = irdma_initialize_hw_rsrc(rf); in irdma_ctrl_init_hw()
1918 rf->init_state = HW_RSRC_INITIALIZED; in irdma_ctrl_init_hw()
1920 status = irdma_create_ccq(rf); in irdma_ctrl_init_hw()
1923 rf->init_state = CCQ_CREATED; in irdma_ctrl_init_hw()
1926 if (rf->rdma_ver != IRDMA_GEN_1) { in irdma_ctrl_init_hw()
1932 status = irdma_setup_ceq_0(rf); in irdma_ctrl_init_hw()
1935 rf->init_state = CEQ0_CREATED; in irdma_ctrl_init_hw()
1937 rf->cqp_cmpl_wq = in irdma_ctrl_init_hw()
1939 if (!rf->cqp_cmpl_wq) { in irdma_ctrl_init_hw()
1943 INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker); in irdma_ctrl_init_hw()
1948 dev_err(&rf->pcidev->dev, "IRDMA hardware initialization FAILED init_state=%d status=%d\n", in irdma_ctrl_init_hw()
1949 rf->init_state, status); in irdma_ctrl_init_hw()
1950 irdma_ctrl_deinit_hw(rf); in irdma_ctrl_init_hw()
1956 * @rf: RDMA PCI function
1958 static void irdma_set_hw_rsrc(struct irdma_pci_f *rf) in irdma_set_hw_rsrc() argument
1960 rf->allocated_qps = (void *)(rf->mem_rsrc + in irdma_set_hw_rsrc()
1961 (sizeof(struct irdma_arp_entry) * rf->arp_table_size)); in irdma_set_hw_rsrc()
1962 rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)]; in irdma_set_hw_rsrc()
1963 rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)]; in irdma_set_hw_rsrc()
1964 rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)]; in irdma_set_hw_rsrc()
1965 rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)]; in irdma_set_hw_rsrc()
1966 rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)]; in irdma_set_hw_rsrc()
1967 rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)]; in irdma_set_hw_rsrc()
1968 rf->qp_table = (struct irdma_qp **) in irdma_set_hw_rsrc()
1969 (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]); in irdma_set_hw_rsrc()
1970 rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]); in irdma_set_hw_rsrc()
1972 spin_lock_init(&rf->rsrc_lock); in irdma_set_hw_rsrc()
1973 spin_lock_init(&rf->arp_lock); in irdma_set_hw_rsrc()
1974 spin_lock_init(&rf->qptable_lock); in irdma_set_hw_rsrc()
1975 spin_lock_init(&rf->cqtable_lock); in irdma_set_hw_rsrc()
1976 spin_lock_init(&rf->qh_list_lock); in irdma_set_hw_rsrc()
1981 * @rf: RDMA PCI function
1983 static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf) in irdma_calc_mem_rsrc_size() argument
1987 rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size; in irdma_calc_mem_rsrc_size()
1988 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp); in irdma_calc_mem_rsrc_size()
1989 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr); in irdma_calc_mem_rsrc_size()
1990 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq); in irdma_calc_mem_rsrc_size()
1991 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd); in irdma_calc_mem_rsrc_size()
1992 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size); in irdma_calc_mem_rsrc_size()
1993 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah); in irdma_calc_mem_rsrc_size()
1994 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg); in irdma_calc_mem_rsrc_size()
1995 rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp; in irdma_calc_mem_rsrc_size()
1996 rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq; in irdma_calc_mem_rsrc_size()
2003 * @rf: RDMA PCI function
2005 u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf) in irdma_initialize_hw_rsrc() argument
2011 if (rf->rdma_ver != IRDMA_GEN_1) { in irdma_initialize_hw_rsrc()
2012 rf->allocated_ws_nodes = bitmap_zalloc(IRDMA_MAX_WS_NODES, in irdma_initialize_hw_rsrc()
2014 if (!rf->allocated_ws_nodes) in irdma_initialize_hw_rsrc()
2017 set_bit(0, rf->allocated_ws_nodes); in irdma_initialize_hw_rsrc()
2018 rf->max_ws_node_id = IRDMA_MAX_WS_NODES; in irdma_initialize_hw_rsrc()
2020 rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size; in irdma_initialize_hw_rsrc()
2021 rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt; in irdma_initialize_hw_rsrc()
2022 rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt; in irdma_initialize_hw_rsrc()
2023 rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt; in irdma_initialize_hw_rsrc()
2024 rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds; in irdma_initialize_hw_rsrc()
2025 rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt; in irdma_initialize_hw_rsrc()
2026 rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt; in irdma_initialize_hw_rsrc()
2027 rf->max_mcg = rf->max_qp; in irdma_initialize_hw_rsrc()
2029 rsrc_size = irdma_calc_mem_rsrc_size(rf); in irdma_initialize_hw_rsrc()
2030 rf->mem_rsrc = vzalloc(rsrc_size); in irdma_initialize_hw_rsrc()
2031 if (!rf->mem_rsrc) { in irdma_initialize_hw_rsrc()
2036 rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc; in irdma_initialize_hw_rsrc()
2038 irdma_set_hw_rsrc(rf); in irdma_initialize_hw_rsrc()
2040 set_bit(0, rf->allocated_mrs); in irdma_initialize_hw_rsrc()
2041 set_bit(0, rf->allocated_qps); in irdma_initialize_hw_rsrc()
2042 set_bit(0, rf->allocated_cqs); in irdma_initialize_hw_rsrc()
2043 set_bit(0, rf->allocated_pds); in irdma_initialize_hw_rsrc()
2044 set_bit(0, rf->allocated_arps); in irdma_initialize_hw_rsrc()
2045 set_bit(0, rf->allocated_ahs); in irdma_initialize_hw_rsrc()
2046 set_bit(0, rf->allocated_mcgs); in irdma_initialize_hw_rsrc()
2047 set_bit(2, rf->allocated_qps); /* qp 2 IEQ */ in irdma_initialize_hw_rsrc()
2048 set_bit(1, rf->allocated_qps); /* qp 1 ILQ */ in irdma_initialize_hw_rsrc()
2049 set_bit(1, rf->allocated_cqs); in irdma_initialize_hw_rsrc()
2050 set_bit(1, rf->allocated_pds); in irdma_initialize_hw_rsrc()
2051 set_bit(2, rf->allocated_cqs); in irdma_initialize_hw_rsrc()
2052 set_bit(2, rf->allocated_pds); in irdma_initialize_hw_rsrc()
2054 INIT_LIST_HEAD(&rf->mc_qht_list.list); in irdma_initialize_hw_rsrc()
2056 mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14); in irdma_initialize_hw_rsrc()
2057 rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits)); in irdma_initialize_hw_rsrc()
2062 bitmap_free(rf->allocated_ws_nodes); in irdma_initialize_hw_rsrc()
2063 rf->allocated_ws_nodes = NULL; in irdma_initialize_hw_rsrc()
2070 * @rf: RDMA PCI function
2073 void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq) in irdma_cqp_ce_handler() argument
2076 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_cqp_ce_handler()
2084 spin_lock_irqsave(&rf->cqp.compl_lock, flags); in irdma_cqp_ce_handler()
2086 spin_unlock_irqrestore(&rf->cqp.compl_lock, flags); in irdma_cqp_ce_handler()
2095 ibdev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n", in irdma_cqp_ce_handler()
2106 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_cqp_ce_handler()
2110 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_cqp_ce_handler()
2129 struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f, in cqp_compl_worker() local
2131 struct irdma_sc_cq *cq = &rf->ccq.sc_cq; in cqp_compl_worker()
2133 irdma_cqp_ce_handler(rf, cq); in cqp_compl_worker()
2191 * @rf: RDMA PCI function
2194 void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx) in irdma_del_local_mac_entry() argument
2196 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_del_local_mac_entry()
2212 irdma_handle_cqp_op(rf, cqp_request); in irdma_del_local_mac_entry()
2219 * @rf: RDMA PCI function
2223 int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx) in irdma_add_local_mac_entry() argument
2226 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_add_local_mac_entry()
2245 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_add_local_mac_entry()
2253 * @rf: RDMA PCI function
2260 int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx) in irdma_alloc_local_mac_entry() argument
2262 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_alloc_local_mac_entry()
2276 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_alloc_local_mac_entry()
2299 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port); in irdma_cqp_manage_apbvt_cmd()
2310 cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp; in irdma_cqp_manage_apbvt_cmd()
2315 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); in irdma_cqp_manage_apbvt_cmd()
2316 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); in irdma_cqp_manage_apbvt_cmd()
2387 * @rf: RDMA PCI function
2393 void irdma_manage_arp_cache(struct irdma_pci_f *rf, in irdma_manage_arp_cache() argument
2402 arp_index = irdma_arp_table(rf, ip_addr, ipv4, mac_addr, action); in irdma_manage_arp_cache()
2406 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false); in irdma_manage_arp_cache()
2420 cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp; in irdma_manage_arp_cache()
2425 cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp; in irdma_manage_arp_cache()
2430 irdma_handle_cqp_op(rf, cqp_request); in irdma_manage_arp_cache()
2431 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_manage_arp_cache()
2461 struct irdma_cqp *iwcqp = &iwdev->rf->cqp; in irdma_manage_qhash()
2526 cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp; in irdma_manage_qhash()
2530 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); in irdma_manage_qhash()
2578 * @rf: RDMA PCI function
2583 int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp, in irdma_hw_flush_wqes() argument
2592 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); in irdma_hw_flush_wqes()
2605 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_hw_flush_wqes()
2609 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_hw_flush_wqes()
2640 new_req = irdma_alloc_and_get_cqp_request(&rf->cqp, true); in irdma_hw_flush_wqes()
2653 status = irdma_handle_cqp_op(rf, new_req); in irdma_hw_flush_wqes()
2662 irdma_put_cqp_request(&rf->cqp, new_req); in irdma_hw_flush_wqes()
2673 ibdev_dbg(&rf->iwdev->ibdev, in irdma_hw_flush_wqes()
2675 iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state, in irdma_hw_flush_wqes()
2680 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_hw_flush_wqes()
2687 * @rf: RDMA PCI function
2692 void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp, in irdma_gen_ae() argument
2699 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); in irdma_gen_ae()
2711 irdma_handle_cqp_op(rf, cqp_request); in irdma_gen_ae()
2712 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_gen_ae()
2718 struct irdma_pci_f *rf = iwqp->iwdev->rf; in irdma_flush_wqes() local
2754 (void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info, in irdma_flush_wqes()