Lines Matching +full:hw +full:- +full:revision

15  *      - Redistributions of source code must retain the above
19 * - Redistributions in binary form must reproduce the above
51 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in hns_roce_set_mac()
54 if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN)) in hns_roce_set_mac()
58 hr_dev->dev_addr[port][i] = addr[i]; in hns_roce_set_mac()
60 phy_port = hr_dev->iboe.phy_port[port]; in hns_roce_set_mac()
61 return hr_dev->hw->set_mac(hr_dev, phy_port, addr); in hns_roce_set_mac()
66 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); in hns_roce_add_gid()
67 u32 port = attr->port_num - 1; in hns_roce_add_gid()
70 if (port >= hr_dev->caps.num_ports) in hns_roce_add_gid()
71 return -EINVAL; in hns_roce_add_gid()
73 ret = hr_dev->hw->set_gid(hr_dev, attr->index, &attr->gid, attr); in hns_roce_add_gid()
80 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); in hns_roce_del_gid()
81 u32 port = attr->port_num - 1; in hns_roce_del_gid()
84 if (port >= hr_dev->caps.num_ports) in hns_roce_del_gid()
85 return -EINVAL; in hns_roce_del_gid()
87 ret = hr_dev->hw->set_gid(hr_dev, attr->index, NULL, NULL); in hns_roce_del_gid()
99 net_dev = ib_device_get_netdev(&hr_dev->ib_dev, port_num); in hns_roce_get_port_state()
101 return -ENODEV; in hns_roce_get_port_state()
103 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_BOND) { in hns_roce_get_port_state()
106 *state = ib_get_curr_port_state(bond_grp->upper_dev); in hns_roce_get_port_state()
121 struct ib_device *ibdev = &hr_dev->ib_dev; in handle_en_event()
122 struct device *dev = hr_dev->dev; in handle_en_event()
129 return -ENODEV; in handle_en_event()
135 ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr); in handle_en_event()
139 ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr); in handle_en_event()
148 write_lock_irq(&ibdev->cache_lock); in handle_en_event()
149 if (ibdev->port_data[port].cache.last_port_state == curr_state) { in handle_en_event()
150 write_unlock_irq(&ibdev->cache_lock); in handle_en_event()
153 ibdev->port_data[port].cache.last_port_state = curr_state; in handle_en_event()
154 write_unlock_irq(&ibdev->cache_lock); in handle_en_event()
182 iboe = &hr_dev->iboe; in hns_roce_netdev_event()
183 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_BOND) { in hns_roce_netdev_event()
186 upper = bond_grp ? bond_grp->upper_dev : NULL; in hns_roce_netdev_event()
189 for (port = 0; port < hr_dev->caps.num_ports; port++) { in hns_roce_netdev_event()
190 if ((!upper && dev == iboe->netdevs[port]) || in hns_roce_netdev_event()
208 for (i = 0; i < hr_dev->caps.num_ports; i++) { in hns_roce_setup_mtu_mac()
210 ret = hns_roce_set_mac(hr_dev, i, net_dev->dev_addr); in hns_roce_setup_mtu_mac()
226 props->fw_ver = hr_dev->caps.fw_ver; in hns_roce_query_device()
227 props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid); in hns_roce_query_device()
228 props->max_mr_size = (u64)(~(0ULL)); in hns_roce_query_device()
229 props->page_size_cap = hr_dev->caps.page_size_cap; in hns_roce_query_device()
230 props->vendor_id = hr_dev->vendor_id; in hns_roce_query_device()
231 props->vendor_part_id = hr_dev->vendor_part_id; in hns_roce_query_device()
232 props->hw_ver = hr_dev->hw_rev; in hns_roce_query_device()
233 props->max_qp = hr_dev->caps.num_qps; in hns_roce_query_device()
234 props->max_qp_wr = hr_dev->caps.max_wqes; in hns_roce_query_device()
235 props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT | in hns_roce_query_device()
237 props->max_send_sge = hr_dev->caps.max_sq_sg; in hns_roce_query_device()
238 props->max_recv_sge = hr_dev->caps.max_rq_sg; in hns_roce_query_device()
239 props->max_sge_rd = hr_dev->caps.max_sq_sg; in hns_roce_query_device()
240 props->max_cq = hr_dev->caps.num_cqs; in hns_roce_query_device()
241 props->max_cqe = hr_dev->caps.max_cqes; in hns_roce_query_device()
242 props->max_mr = hr_dev->caps.num_mtpts; in hns_roce_query_device()
243 props->max_pd = hr_dev->caps.num_pds; in hns_roce_query_device()
244 props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma; in hns_roce_query_device()
245 props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma; in hns_roce_query_device()
246 props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ? in hns_roce_query_device()
248 props->max_pkeys = 1; in hns_roce_query_device()
249 props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay; in hns_roce_query_device()
250 props->max_ah = INT_MAX; in hns_roce_query_device()
251 props->cq_caps.max_cq_moderation_period = HNS_ROCE_MAX_CQ_PERIOD; in hns_roce_query_device()
252 props->cq_caps.max_cq_moderation_count = HNS_ROCE_MAX_CQ_COUNT; in hns_roce_query_device()
253 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) in hns_roce_query_device()
254 props->cq_caps.max_cq_moderation_period = HNS_ROCE_MAX_CQ_PERIOD_HIP08; in hns_roce_query_device()
256 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { in hns_roce_query_device()
257 props->max_srq = hr_dev->caps.num_srqs; in hns_roce_query_device()
258 props->max_srq_wr = hr_dev->caps.max_srq_wrs; in hns_roce_query_device()
259 props->max_srq_sge = hr_dev->caps.max_srq_sges; in hns_roce_query_device()
262 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR && in hns_roce_query_device()
263 hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { in hns_roce_query_device()
264 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; in hns_roce_query_device()
265 props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA; in hns_roce_query_device()
268 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) in hns_roce_query_device()
269 props->device_cap_flags |= IB_DEVICE_XRC; in hns_roce_query_device()
283 port = port_num - 1; in hns_roce_query_port()
287 props->max_mtu = hr_dev->caps.max_mtu; in hns_roce_query_port()
288 props->gid_tbl_len = hr_dev->caps.gid_table_len[port]; in hns_roce_query_port()
289 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | in hns_roce_query_port()
292 props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN; in hns_roce_query_port()
293 props->pkey_tbl_len = 1; in hns_roce_query_port()
294 ret = ib_get_eth_speed(ib_dev, port_num, &props->active_speed, in hns_roce_query_port()
295 &props->active_width); in hns_roce_query_port()
302 return -EINVAL; in hns_roce_query_port()
305 mtu = iboe_get_mtu(net_dev->mtu); in hns_roce_query_port()
306 props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256; in hns_roce_query_port()
310 ret = hns_roce_get_port_state(hr_dev, port_num, &props->state); in hns_roce_query_port()
316 props->phys_state = props->state == IB_PORT_ACTIVE ? in hns_roce_query_port()
332 return -EINVAL; in hns_roce_query_pkey()
345 return -EOPNOTSUPP; in hns_roce_modify_device()
348 spin_lock_irqsave(&to_hr_dev(ib_dev)->sm_lock, flags); in hns_roce_modify_device()
349 memcpy(ib_dev->node_desc, props->node_desc, NODE_DESC_SIZE); in hns_roce_modify_device()
350 spin_unlock_irqrestore(&to_hr_dev(ib_dev)->sm_lock, flags); in hns_roce_modify_device()
368 entry->address = address; in hns_roce_user_mmap_entry_insert()
369 entry->mmap_type = mmap_type; in hns_roce_user_mmap_entry_insert()
375 ucontext, &entry->rdma_entry, length, 0); in hns_roce_user_mmap_entry_insert()
379 ucontext, &entry->rdma_entry, length, 1, in hns_roce_user_mmap_entry_insert()
383 ret = -EINVAL; in hns_roce_user_mmap_entry_insert()
397 if (context->db_mmap_entry) in hns_roce_dealloc_uar_entry()
399 &context->db_mmap_entry->rdma_entry); in hns_roce_dealloc_uar_entry()
407 address = context->uar.pfn << PAGE_SHIFT; in hns_roce_alloc_uar_entry()
408 context->db_mmap_entry = hns_roce_user_mmap_entry_insert( in hns_roce_alloc_uar_entry()
410 if (!context->db_mmap_entry) in hns_roce_alloc_uar_entry()
411 return -ENOMEM; in hns_roce_alloc_uar_entry()
420 struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device); in hns_roce_alloc_ucontext()
423 int ret = -EAGAIN; in hns_roce_alloc_ucontext()
425 if (!hr_dev->active) in hns_roce_alloc_ucontext()
428 resp.qp_tab_size = hr_dev->caps.num_qps; in hns_roce_alloc_ucontext()
429 resp.srq_tab_size = hr_dev->caps.num_srqs; in hns_roce_alloc_ucontext()
432 min(udata->inlen, sizeof(ucmd))); in hns_roce_alloc_ucontext()
436 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in hns_roce_alloc_ucontext()
437 context->config = ucmd.config & HNS_ROCE_EXSGE_FLAGS; in hns_roce_alloc_ucontext()
439 if (context->config & HNS_ROCE_EXSGE_FLAGS) { in hns_roce_alloc_ucontext()
441 resp.max_inline_data = hr_dev->caps.max_sq_inline; in hns_roce_alloc_ucontext()
444 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) { in hns_roce_alloc_ucontext()
445 context->config |= ucmd.config & HNS_ROCE_RQ_INLINE_FLAGS; in hns_roce_alloc_ucontext()
446 if (context->config & HNS_ROCE_RQ_INLINE_FLAGS) in hns_roce_alloc_ucontext()
450 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQE_INLINE) { in hns_roce_alloc_ucontext()
451 context->config |= ucmd.config & HNS_ROCE_CQE_INLINE_FLAGS; in hns_roce_alloc_ucontext()
452 if (context->config & HNS_ROCE_CQE_INLINE_FLAGS) in hns_roce_alloc_ucontext()
456 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in hns_roce_alloc_ucontext()
457 resp.congest_type = hr_dev->caps.cong_cap; in hns_roce_alloc_ucontext()
459 ret = hns_roce_uar_alloc(hr_dev, &context->uar); in hns_roce_alloc_ucontext()
467 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || in hns_roce_alloc_ucontext()
468 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) { in hns_roce_alloc_ucontext()
469 INIT_LIST_HEAD(&context->page_list); in hns_roce_alloc_ucontext()
470 mutex_init(&context->page_mutex); in hns_roce_alloc_ucontext()
473 resp.cqe_size = hr_dev->caps.cqe_sz; in hns_roce_alloc_ucontext()
476 min(udata->outlen, sizeof(resp))); in hns_roce_alloc_ucontext()
485 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || in hns_roce_alloc_ucontext()
486 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) in hns_roce_alloc_ucontext()
487 mutex_destroy(&context->page_mutex); in hns_roce_alloc_ucontext()
491 ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx); in hns_roce_alloc_ucontext()
494 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_UCTX_ALLOC_ERR_CNT]); in hns_roce_alloc_ucontext()
502 struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device); in hns_roce_dealloc_ucontext()
506 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || in hns_roce_dealloc_ucontext()
507 hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) in hns_roce_dealloc_ucontext()
508 mutex_destroy(&context->page_mutex); in hns_roce_dealloc_ucontext()
512 ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx); in hns_roce_dealloc_ucontext()
517 struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device); in hns_roce_mmap()
524 if (hr_dev->dis_db) { in hns_roce_mmap()
525 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MMAP_ERR_CNT]); in hns_roce_mmap()
526 return -EPERM; in hns_roce_mmap()
529 rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff); in hns_roce_mmap()
531 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MMAP_ERR_CNT]); in hns_roce_mmap()
532 return -EINVAL; in hns_roce_mmap()
536 pfn = entry->address >> PAGE_SHIFT; in hns_roce_mmap()
538 switch (entry->mmap_type) { in hns_roce_mmap()
541 prot = pgprot_device(vma->vm_page_prot); in hns_roce_mmap()
544 ret = -EINVAL; in hns_roce_mmap()
548 ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE, in hns_roce_mmap()
554 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MMAP_ERR_CNT]); in hns_roce_mmap()
576 immutable->pkey_tbl_len = attr.pkey_tbl_len; in hns_roce_port_immutable()
577 immutable->gid_tbl_len = attr.gid_tbl_len; in hns_roce_port_immutable()
579 immutable->max_mad_size = IB_MGMT_MAD_SIZE; in hns_roce_port_immutable()
580 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; in hns_roce_port_immutable()
581 if (to_hr_dev(ib_dev)->caps.flags & HNS_ROCE_CAP_FLAG_ROCE_V1_V2) in hns_roce_port_immutable()
582 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; in hns_roce_port_immutable()
593 u64 fw_ver = to_hr_dev(device)->caps.fw_ver; in hns_roce_get_fw_ver()
637 if (port_num > hr_dev->caps.num_ports) { in hns_roce_alloc_hw_port_stats()
658 if (port > hr_dev->caps.num_ports) in hns_roce_get_hw_stats()
659 return -EINVAL; in hns_roce_get_hw_stats()
661 ret = hr_dev->hw->query_hw_counter(hr_dev, stats->value, port, in hns_roce_get_hw_stats()
664 ibdev_err(device, "failed to query hw counter, ret = %d\n", in hns_roce_get_hw_stats()
680 * is unregistered, re-initialize the remaining slaves before in hns_roce_unregister_bond_cleanup()
683 bond_grp->bond_state = HNS_ROCE_BOND_NOT_BONDED; in hns_roce_unregister_bond_cleanup()
685 net_dev = bond_grp->bond_func_info[i].net_dev; in hns_roce_unregister_bond_cleanup()
697 struct hns_roce_ib_iboe *iboe = &hr_dev->iboe; in hns_roce_unregister_device()
701 if (bond_cleanup && hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_BOND) { in hns_roce_unregister_device()
707 hr_dev->active = false; in hns_roce_unregister_device()
708 unregister_netdevice_notifier(&iboe->nb); in hns_roce_unregister_device()
709 ib_unregister_device(&hr_dev->ib_dev); in hns_roce_unregister_device()
795 struct device *dev = hr_dev->dev; in hns_roce_register_device()
801 iboe = &hr_dev->iboe; in hns_roce_register_device()
802 spin_lock_init(&iboe->lock); in hns_roce_register_device()
804 ib_dev = &hr_dev->ib_dev; in hns_roce_register_device()
806 ib_dev->node_type = RDMA_NODE_IB_CA; in hns_roce_register_device()
807 ib_dev->dev.parent = dev; in hns_roce_register_device()
809 ib_dev->phys_port_cnt = hr_dev->caps.num_ports; in hns_roce_register_device()
810 ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey; in hns_roce_register_device()
811 ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors; in hns_roce_register_device()
813 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) in hns_roce_register_device()
816 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) in hns_roce_register_device()
819 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { in hns_roce_register_device()
821 ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops); in hns_roce_register_device()
824 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) in hns_roce_register_device()
827 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09 && in hns_roce_register_device()
828 !hr_dev->is_vf) in hns_roce_register_device()
831 ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops); in hns_roce_register_device()
837 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_BOND) { in hns_roce_register_device()
846 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_BOND && in hns_roce_register_device()
855 for (i = 0; i < hr_dev->caps.num_ports; i++) { in hns_roce_register_device()
877 iboe->nb.notifier_call = hns_roce_netdev_event; in hns_roce_register_device()
878 ret = register_netdevice_notifier(&iboe->nb); in hns_roce_register_device()
884 hr_dev->active = true; in hns_roce_register_device()
895 struct device *dev = hr_dev->dev; in hns_roce_init_hem()
898 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table, in hns_roce_init_hem()
899 HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz, in hns_roce_init_hem()
900 hr_dev->caps.num_mtpts); in hns_roce_init_hem()
906 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table, in hns_roce_init_hem()
907 HEM_TYPE_QPC, hr_dev->caps.qpc_sz, in hns_roce_init_hem()
908 hr_dev->caps.num_qps); in hns_roce_init_hem()
914 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table, in hns_roce_init_hem()
916 hr_dev->caps.irrl_entry_sz * in hns_roce_init_hem()
917 hr_dev->caps.max_qp_init_rdma, in hns_roce_init_hem()
918 hr_dev->caps.num_qps); in hns_roce_init_hem()
924 if (hr_dev->caps.trrl_entry_sz) { in hns_roce_init_hem()
926 &hr_dev->qp_table.trrl_table, in hns_roce_init_hem()
928 hr_dev->caps.trrl_entry_sz * in hns_roce_init_hem()
929 hr_dev->caps.max_qp_dest_rdma, in hns_roce_init_hem()
930 hr_dev->caps.num_qps); in hns_roce_init_hem()
938 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table, in hns_roce_init_hem()
939 HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz, in hns_roce_init_hem()
940 hr_dev->caps.num_cqs); in hns_roce_init_hem()
946 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { in hns_roce_init_hem()
947 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table, in hns_roce_init_hem()
949 hr_dev->caps.srqc_entry_sz, in hns_roce_init_hem()
950 hr_dev->caps.num_srqs); in hns_roce_init_hem()
958 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { in hns_roce_init_hem()
960 &hr_dev->qp_table.sccc_table, in hns_roce_init_hem()
962 hr_dev->caps.sccc_sz, in hns_roce_init_hem()
963 hr_dev->caps.num_qps); in hns_roce_init_hem()
971 if (hr_dev->caps.qpc_timer_entry_sz) { in hns_roce_init_hem()
972 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table, in hns_roce_init_hem()
974 hr_dev->caps.qpc_timer_entry_sz, in hns_roce_init_hem()
975 hr_dev->caps.qpc_timer_bt_num); in hns_roce_init_hem()
983 if (hr_dev->caps.cqc_timer_entry_sz) { in hns_roce_init_hem()
984 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table, in hns_roce_init_hem()
986 hr_dev->caps.cqc_timer_entry_sz, in hns_roce_init_hem()
987 hr_dev->caps.cqc_timer_bt_num); in hns_roce_init_hem()
995 if (hr_dev->caps.gmv_entry_sz) { in hns_roce_init_hem()
996 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->gmv_table, in hns_roce_init_hem()
998 hr_dev->caps.gmv_entry_sz, in hns_roce_init_hem()
999 hr_dev->caps.gmv_entry_num); in hns_roce_init_hem()
1011 if (hr_dev->caps.cqc_timer_entry_sz) in hns_roce_init_hem()
1012 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cqc_timer_table); in hns_roce_init_hem()
1015 if (hr_dev->caps.qpc_timer_entry_sz) in hns_roce_init_hem()
1016 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qpc_timer_table); in hns_roce_init_hem()
1019 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) in hns_roce_init_hem()
1021 &hr_dev->qp_table.sccc_table); in hns_roce_init_hem()
1023 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) in hns_roce_init_hem()
1024 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table); in hns_roce_init_hem()
1027 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); in hns_roce_init_hem()
1030 if (hr_dev->caps.trrl_entry_sz) in hns_roce_init_hem()
1032 &hr_dev->qp_table.trrl_table); in hns_roce_init_hem()
1035 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table); in hns_roce_init_hem()
1038 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table); in hns_roce_init_hem()
1041 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table); in hns_roce_init_hem()
1049 mutex_destroy(&hr_dev->pgdir_mutex); in hns_roce_teardown_hca()
1053 * hns_roce_setup_hca - setup host channel adapter
1059 struct device *dev = hr_dev->dev; in hns_roce_setup_hca()
1062 spin_lock_init(&hr_dev->sm_lock); in hns_roce_setup_hca()
1064 INIT_LIST_HEAD(&hr_dev->qp_list); in hns_roce_setup_hca()
1065 spin_lock_init(&hr_dev->qp_list_lock); in hns_roce_setup_hca()
1067 INIT_LIST_HEAD(&hr_dev->pgdir_list); in hns_roce_setup_hca()
1068 mutex_init(&hr_dev->pgdir_mutex); in hns_roce_setup_hca()
1072 ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar); in hns_roce_setup_hca()
1086 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) in hns_roce_setup_hca()
1093 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) in hns_roce_setup_hca()
1099 ida_destroy(&hr_dev->uar_ida.ida); in hns_roce_setup_hca()
1100 mutex_destroy(&hr_dev->pgdir_mutex); in hns_roce_setup_hca()
1110 spin_lock_irqsave(&hr_cq->lock, flags); in check_and_get_armed_cq()
1111 if (cq->comp_handler) { in check_and_get_armed_cq()
1112 if (!hr_cq->is_armed) { in check_and_get_armed_cq()
1113 hr_cq->is_armed = 1; in check_and_get_armed_cq()
1114 list_add_tail(&hr_cq->node, cq_list); in check_and_get_armed_cq()
1117 spin_unlock_irqrestore(&hr_cq->lock, flags); in check_and_get_armed_cq()
1130 spin_lock_irqsave(&hr_dev->qp_list_lock, flags); in hns_roce_handle_device_err()
1131 list_for_each_entry(hr_qp, &hr_dev->qp_list, node) { in hns_roce_handle_device_err()
1132 spin_lock_irqsave(&hr_qp->sq.lock, flags_qp); in hns_roce_handle_device_err()
1133 if (hr_qp->sq.tail != hr_qp->sq.head) in hns_roce_handle_device_err()
1134 check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq); in hns_roce_handle_device_err()
1135 spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp); in hns_roce_handle_device_err()
1137 spin_lock_irqsave(&hr_qp->rq.lock, flags_qp); in hns_roce_handle_device_err()
1138 if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head)) in hns_roce_handle_device_err()
1139 check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq); in hns_roce_handle_device_err()
1140 spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp); in hns_roce_handle_device_err()
1144 hns_roce_cq_completion(hr_dev, hr_cq->cqn); in hns_roce_handle_device_err()
1146 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); in hns_roce_handle_device_err()
1151 hr_dev->dfx_cnt = kvcalloc(HNS_ROCE_DFX_CNT_TOTAL, sizeof(atomic64_t), in hns_roce_alloc_dfx_cnt()
1153 if (!hr_dev->dfx_cnt) in hns_roce_alloc_dfx_cnt()
1154 return -ENOMEM; in hns_roce_alloc_dfx_cnt()
1161 kvfree(hr_dev->dfx_cnt); in hns_roce_dealloc_dfx_cnt()
1166 struct device *dev = hr_dev->dev; in hns_roce_init()
1169 hr_dev->is_reset = false; in hns_roce_init()
1175 if (hr_dev->hw->cmq_init) { in hns_roce_init()
1176 ret = hr_dev->hw->cmq_init(hr_dev); in hns_roce_init()
1183 ret = hr_dev->hw->hw_profile(hr_dev); in hns_roce_init()
1196 ret = hr_dev->hw->init_eq(hr_dev); in hns_roce_init()
1202 if (hr_dev->cmd_mod) { in hns_roce_init()
1221 if (hr_dev->hw->hw_init) { in hns_roce_init()
1222 ret = hr_dev->hw->hw_init(hr_dev); in hns_roce_init()
1238 if (hr_dev->hw->hw_exit) in hns_roce_init()
1239 hr_dev->hw->hw_exit(hr_dev); in hns_roce_init()
1248 if (hr_dev->cmd_mod) in hns_roce_init()
1250 hr_dev->hw->cleanup_eq(hr_dev); in hns_roce_init()
1256 if (hr_dev->hw->cmq_exit) in hns_roce_init()
1257 hr_dev->hw->cmq_exit(hr_dev); in hns_roce_init()
1270 if (hr_dev->hw->hw_exit) in hns_roce_exit()
1271 hr_dev->hw->hw_exit(hr_dev); in hns_roce_exit()
1275 if (hr_dev->cmd_mod) in hns_roce_exit()
1278 hr_dev->hw->cleanup_eq(hr_dev); in hns_roce_exit()
1280 if (hr_dev->hw->cmq_exit) in hns_roce_exit()
1281 hr_dev->hw->cmq_exit(hr_dev); in hns_roce_exit()