| /linux/drivers/infiniband/hw/hns/ |
| H A D | hns_roce_main.c | 45 static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port, in hns_roce_set_mac() argument 51 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in hns_roce_set_mac() 54 if (!memcmp(hr_dev->dev_addr[port], addr, ETH_ALEN)) in hns_roce_set_mac() 58 hr_dev->dev_addr[port][i] = addr[i]; in hns_roce_set_mac() 60 phy_port = hr_dev->iboe.phy_port[port]; in hns_roce_set_mac() 61 return hr_dev->hw->set_mac(hr_dev, phy_port, addr); in hns_roce_set_mac() 66 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); in hns_roce_add_gid() local 70 if (port >= hr_dev->caps.num_ports) in hns_roce_add_gid() 73 ret = hr_dev->hw->set_gid(hr_dev, attr->index, &attr->gid, attr); in hns_roce_add_gid() 80 struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); in hns_roce_del_gid() local [all …]
|
| H A D | hns_roce_cmd.c | 41 static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, in hns_roce_cmd_mbox_post_hw() argument 46 ret = hr_dev->hw->post_mbox(hr_dev, mbox_msg); in hns_roce_cmd_mbox_post_hw() 50 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MBX_POSTED_CNT]); in hns_roce_cmd_mbox_post_hw() 56 static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, in __hns_roce_cmd_mbox_poll() argument 61 ret = hns_roce_cmd_mbox_post_hw(hr_dev, mbox_msg); in __hns_roce_cmd_mbox_poll() 63 dev_err_ratelimited(hr_dev->dev, in __hns_roce_cmd_mbox_poll() 69 ret = hr_dev->hw->poll_mbox_done(hr_dev); in __hns_roce_cmd_mbox_poll() 73 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MBX_POLLED_CNT]); in __hns_roce_cmd_mbox_poll() 78 static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, in hns_roce_cmd_mbox_poll() argument 83 down(&hr_dev->cmd.poll_sem); in hns_roce_cmd_mbox_poll() [all …]
|
| H A D | hns_roce_cq.c | 43 struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device); in hns_roce_put_cq_bankid_for_uctx() local 44 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; in hns_roce_put_cq_bankid_for_uctx() 46 if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) in hns_roce_put_cq_bankid_for_uctx() 56 struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device); in hns_roce_get_cq_bankid_for_uctx() local 57 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; in hns_roce_get_cq_bankid_for_uctx() 62 if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) in hns_roce_get_cq_bankid_for_uctx() 96 static u8 select_cq_bankid(struct hns_roce_dev *hr_dev, in select_cq_bankid() argument 103 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in select_cq_bankid() 109 static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, in alloc_cqn() argument 112 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; in alloc_cqn() [all …]
|
| H A D | hns_roce_qp.c | 41 static struct hns_roce_qp *hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, in hns_roce_qp_lookup() argument 44 struct device *dev = hr_dev->dev; in hns_roce_qp_lookup() 48 xa_lock_irqsave(&hr_dev->qp_table_xa, flags); in hns_roce_qp_lookup() 49 qp = __hns_roce_qp_lookup(hr_dev, qpn); in hns_roce_qp_lookup() 52 xa_unlock_irqrestore(&hr_dev->qp_table_xa, flags); in hns_roce_qp_lookup() 66 struct device *dev = flush_work->hr_dev->dev; in flush_work_handle() 89 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) in init_flush_work() argument 102 queue_work(hr_dev->irq_workq, &flush_work->work); in init_flush_work() 121 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) in hns_roce_qp_event() argument 125 qp = hns_roce_qp_lookup(hr_dev, qpn); in hns_roce_qp_event() [all …]
|
| H A D | hns_roce_srq.c | 12 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type) in hns_roce_srq_event() argument 14 struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; in hns_roce_srq_event() 18 srq = xa_load(&srq_table->xa, srqn & (hr_dev->caps.num_srqs - 1)); in hns_roce_srq_event() 24 dev_warn(hr_dev->dev, "Async event for bogus SRQ %08x\n", srqn); in hns_roce_srq_event() 37 struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device); in hns_roce_ib_srq_event() local 52 dev_err(hr_dev->dev, in hns_roce_ib_srq_event() 62 static int alloc_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) in alloc_srqn() argument 64 struct hns_roce_ida *srq_ida = &hr_dev->srq_table.srq_ida; in alloc_srqn() 70 ibdev_err(&hr_dev->ib_dev, "failed to alloc srq(%d).\n", id); in alloc_srqn() 79 static void free_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) in free_srqn() argument [all …]
|
| H A D | hns_roce_pd.c | 35 void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev) in hns_roce_init_pd_table() argument 37 struct hns_roce_ida *pd_ida = &hr_dev->pd_ida; in hns_roce_init_pd_table() 40 pd_ida->max = hr_dev->caps.num_pds - 1; in hns_roce_init_pd_table() 41 pd_ida->min = hr_dev->caps.reserved_pds; in hns_roce_init_pd_table() 47 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); in hns_roce_alloc_pd() local 48 struct hns_roce_ida *pd_ida = &hr_dev->pd_ida; in hns_roce_alloc_pd() 77 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); in hns_roce_dealloc_pd() local 79 ida_free(&hr_dev->pd_ida.ida, (int)to_hr_pd(pd)->pdn); in hns_roce_dealloc_pd() 84 int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) in hns_roce_uar_alloc() argument 86 struct hns_roce_ida *uar_ida = &hr_dev->uar_ida; in hns_roce_uar_alloc() [all …]
|
| H A D | hns_roce_hw_v2.c | 277 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); in check_inl_data_len() local 281 ibdev_err(&hr_dev->ib_dev, in check_inl_data_len() 294 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); in set_rc_inl() local 296 struct ib_device *ibdev = &hr_dev->ib_dev; in set_rc_inl() 376 static int check_send_valid(struct hns_roce_dev *hr_dev, in check_send_valid() argument 383 else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) in check_send_valid() 437 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); in fill_ud_av() local 450 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in fill_ud_av() 516 static int set_rc_opcode(struct hns_roce_dev *hr_dev, in set_rc_opcode() argument 541 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in set_rc_opcode() [all …]
|
| H A D | hns_roce_bond.c | 80 struct hns_roce_dev *hr_dev) in hns_roce_set_bond_netdev() argument 101 active_dev = get_hr_netdev(hr_dev, 0); in hns_roce_set_bond_netdev() 103 old_dev = ib_device_get_netdev(&hr_dev->ib_dev, 1); in hns_roce_set_bond_netdev() 107 ret = ib_device_set_netdev(&hr_dev->ib_dev, active_dev, 1); in hns_roce_set_bond_netdev() 109 dev_err(hr_dev->dev, "failed to set netdev for bond.\n"); in hns_roce_set_bond_netdev() 115 roce_del_all_netdev_gids(&hr_dev->ib_dev, 1, old_dev); in hns_roce_set_bond_netdev() 116 rdma_roce_rescan_port(&hr_dev->ib_dev, 1); in hns_roce_set_bond_netdev() 123 bool hns_roce_bond_is_active(struct hns_roce_dev *hr_dev) in hns_roce_bond_is_active() argument 125 struct net_device *net_dev = get_hr_netdev(hr_dev, 0); in hns_roce_bond_is_active() 127 u8 bus_num = get_hr_bus_num(hr_dev); in hns_roce_bond_is_active() [all …]
|
| H A D | hns_roce_alloc.c | 38 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf) in hns_roce_buf_free() argument 50 dma_free_coherent(hr_dev->dev, 1 << buf->trunk_shift, in hns_roce_buf_free() 66 struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, in hns_roce_buf_alloc() argument 105 trunks[i].buf = dma_alloc_coherent(hr_dev->dev, trunk_size, in hns_roce_buf_alloc() 118 dma_free_coherent(hr_dev->dev, trunk_size, in hns_roce_buf_alloc() 132 int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, in hns_roce_get_kmem_bufs() argument 141 dev_err(hr_dev->dev, "failed to check kmem buf shift %u > %u\n", in hns_roce_get_kmem_bufs() 173 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev) in hns_roce_cleanup_bitmap() argument 175 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) in hns_roce_cleanup_bitmap() 176 ida_destroy(&hr_dev->xrcd_ida.ida); in hns_roce_cleanup_bitmap() [all …]
|
| H A D | hns_roce_restrack.c | 44 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); in hns_roce_fill_res_cq_entry_raw() local 49 if (!hr_dev->hw->query_cqc) in hns_roce_fill_res_cq_entry_raw() 52 ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context); in hns_roce_fill_res_cq_entry_raw() 97 struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device); in hns_roce_fill_res_qp_entry_raw() local 106 if (!hr_dev->hw->query_qpc) in hns_roce_fill_res_qp_entry_raw() 109 ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context.qpc); in hns_roce_fill_res_qp_entry_raw() 116 if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) || in hns_roce_fill_res_qp_entry_raw() 117 !hr_dev->hw->query_sccc) in hns_roce_fill_res_qp_entry_raw() 126 ret = hr_dev->hw->query_sccc(hr_dev, sccn, &context.sccc); in hns_roce_fill_res_qp_entry_raw() 128 ibdev_warn_ratelimited(&hr_dev->ib_dev, in hns_roce_fill_res_qp_entry_raw() [all …]
|
| H A D | hns_roce_device.h | 589 struct hns_roce_dev *hr_dev; member 697 struct hns_roce_dev *hr_dev; member 913 int (*cmq_init)(struct hns_roce_dev *hr_dev); 914 void (*cmq_exit)(struct hns_roce_dev *hr_dev); 915 int (*hw_profile)(struct hns_roce_dev *hr_dev); 916 int (*hw_init)(struct hns_roce_dev *hr_dev); 917 void (*hw_exit)(struct hns_roce_dev *hr_dev); 918 int (*post_mbox)(struct hns_roce_dev *hr_dev, 920 int (*poll_mbox_done)(struct hns_roce_dev *hr_dev); 921 bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy); [all …]
|
| H A D | hns_roce_hem.h | 88 void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem); 89 int hns_roce_table_get(struct hns_roce_dev *hr_dev, 91 void hns_roce_table_put(struct hns_roce_dev *hr_dev, 93 void *hns_roce_table_find(struct hns_roce_dev *hr_dev, 96 int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, 99 void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, 101 void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev); 102 int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, 105 bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type); 110 int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, [all …]
|
| H A D | hns_roce_db.c | 124 int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db, in hns_roce_alloc_db() argument 130 mutex_lock(&hr_dev->pgdir_mutex); in hns_roce_alloc_db() 132 list_for_each_entry(pgdir, &hr_dev->pgdir_list, list) in hns_roce_alloc_db() 136 pgdir = hns_roce_alloc_db_pgdir(hr_dev->dev); in hns_roce_alloc_db() 142 list_add(&pgdir->list, &hr_dev->pgdir_list); in hns_roce_alloc_db() 148 mutex_unlock(&hr_dev->pgdir_mutex); in hns_roce_alloc_db() 153 void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db) in hns_roce_free_db() argument 158 mutex_lock(&hr_dev->pgdir_mutex); in hns_roce_free_db() 173 dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page, in hns_roce_free_db() 179 mutex_unlock(&hr_dev->pgdir_mutex); in hns_roce_free_db()
|
| H A D | hns_roce_ah.c | 57 struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device); in hns_roce_create_ah() local 65 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata) { in hns_roce_create_ah() 81 ret = hr_dev->hw->get_dscp(hr_dev, tclass, &tc_mode, &priority); in hns_roce_create_ah() 94 if (!check_sl_valid(hr_dev, ah->av.sl)) { in hns_roce_create_ah() 103 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { in hns_roce_create_ah() 122 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_AH_CREATE_ERR_CNT]); in hns_roce_create_ah()
|
| H A D | hns_roce_trace.h | 176 TP_PROTO(struct hns_roce_dev *hr_dev, 178 TP_ARGS(hr_dev, desc), 180 TP_STRUCT__entry(__string(dev_name, dev_name(hr_dev->dev)) 202 TP_PROTO(struct hns_roce_dev *hr_dev, 204 TP_ARGS(hr_dev, desc)); 206 TP_PROTO(struct hns_roce_dev *hr_dev, 208 TP_ARGS(hr_dev, desc));
|
| H A D | hns_roce_cmd.h | 145 int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, 149 hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev); 150 void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
|
| H A D | hns_roce_bond.h | 87 int hns_roce_alloc_bond_grp(struct hns_roce_dev *hr_dev); 90 bool hns_roce_bond_is_active(struct hns_roce_dev *hr_dev); 91 int hns_roce_bond_init(struct hns_roce_dev *hr_dev);
|
| H A D | hns_roce_debugfs.h | 30 void hns_roce_register_debugfs(struct hns_roce_dev *hr_dev); 31 void hns_roce_unregister_debugfs(struct hns_roce_dev *hr_dev);
|
| H A D | hns_roce_hw_v2.h | 1490 static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2], in hns_roce_write64() argument 1493 struct hns_roce_v2_priv *priv = hr_dev->priv; in hns_roce_write64() 1497 if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle)) in hns_roce_write64()
|