Home
last modified time | relevance | path

Searched refs:qp_table (Results 1 – 22 of 22) sorted by relevance

/linux/drivers/gpu/drm/amd/display/dc/dml/dsc/
H A Dqp_tables.h28 static const qp_table qp_table_422_10bpc_min = {
61 static const qp_table qp_table_444_8bpc_max = {
102 static const qp_table qp_table_420_12bpc_max = {
135 static const qp_table qp_table_444_10bpc_min = {
188 static const qp_table qp_table_420_8bpc_max = {
209 static const qp_table qp_table_444_8bpc_min = {
250 static const qp_table qp_table_444_12bpc_min = {
315 static const qp_table qp_table_420_12bpc_min = {
348 static const qp_table qp_table_422_12bpc_min = {
389 static const qp_table qp_table_422_12bpc_max = {
[all …]
H A Drc_calc_fpu.h79 typedef struct qp_entry qp_table[]; typedef
/linux/drivers/net/ethernet/mellanox/mlx4/
H A Dqp.c58 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; in mlx4_qp_event() local
61 spin_lock(&qp_table->lock); in mlx4_qp_event()
67 spin_unlock(&qp_table->lock); in mlx4_qp_event()
231 struct mlx4_qp_table *qp_table = &priv->qp_table; in __mlx4_qp_reserve_range() local
244 *base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align, in __mlx4_qp_reserve_range()
283 struct mlx4_qp_table *qp_table = &priv->qp_table; in __mlx4_qp_release_range() local
287 mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt); in __mlx4_qp_release_range()
316 struct mlx4_qp_table *qp_table = &priv->qp_table; in __mlx4_qp_alloc_icm() local
319 err = mlx4_table_get(dev, &qp_table->qp_table, qpn); in __mlx4_qp_alloc_icm()
323 err = mlx4_table_get(dev, &qp_table->auxc_table, qpn); in __mlx4_qp_alloc_icm()
[all …]
H A Dprofile.c189 for (priv->qp_table.rdmarc_shift = 0; in mlx4_make_profile()
190 request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num; in mlx4_make_profile()
191 ++priv->qp_table.rdmarc_shift) in mlx4_make_profile()
193 dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift; in mlx4_make_profile()
194 priv->qp_table.rdmarc_base = (u32) profile[i].start; in mlx4_make_profile()
196 init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift; in mlx4_make_profile()
H A Dmlx4.h725 struct mlx4_icm_table qp_table; member
906 struct mlx4_qp_table qp_table; member
/linux/drivers/infiniband/hw/hns/
H A Dhns_roce_qp.c252 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in alloc_qpn() local
260 mutex_lock(&qp_table->bank_mutex); in alloc_qpn()
261 bankid = get_least_load_bankid_for_qp(init_attr, qp_table->bank); in alloc_qpn()
263 ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid, in alloc_qpn()
268 mutex_unlock(&qp_table->bank_mutex); in alloc_qpn()
272 qp_table->bank[bankid].inuse++; in alloc_qpn()
273 mutex_unlock(&qp_table->bank_mutex); in alloc_qpn()
327 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in alloc_qpc() local
335 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); in alloc_qpc()
342 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); in alloc_qpc()
[all …]
H A Dhns_roce_main.c906 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table, in hns_roce_init_hem()
914 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table, in hns_roce_init_hem()
926 &hr_dev->qp_table.trrl_table, in hns_roce_init_hem()
960 &hr_dev->qp_table.sccc_table, in hns_roce_init_hem()
1021 &hr_dev->qp_table.sccc_table); in hns_roce_init_hem()
1032 &hr_dev->qp_table.trrl_table); in hns_roce_init_hem()
1035 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table); in hns_roce_init_hem()
1038 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table); in hns_roce_init_hem()
H A Dhns_roce_device.h487 struct hns_roce_hem_table qp_table; member
1000 struct hns_roce_qp_table qp_table; member
H A Dhns_roce_hw_v2.c2702 xa_lock(&hr_dev->qp_table.dip_xa); in free_dip_entry()
2704 xa_for_each(&hr_dev->qp_table.dip_xa, idx, hr_dip) { in free_dip_entry()
2705 __xa_erase(&hr_dev->qp_table.dip_xa, hr_dip->dip_idx); in free_dip_entry()
2709 xa_unlock(&hr_dev->qp_table.dip_xa); in free_dip_entry()
4646 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table, in modify_qp_init_to_rtr()
4654 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table, in modify_qp_init_to_rtr()
4853 struct xarray *dip_xa = &hr_dev->qp_table.dip_xa; in get_dip_ctx_idx()
5764 xa_lock(&hr_dev->qp_table.dip_xa); in put_dip_ctx_idx()
5770 xa_unlock(&hr_dev->qp_table.dip_xa); in put_dip_ctx_idx()
5812 mutex_lock(&hr_dev->qp_table.scc_mutex); in hns_roce_v2_qp_flow_control_init()
[all …]
/linux/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c198 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp()
199 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp()
204 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0()
205 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0()
244 spin_lock(&dev->qp_table.lock); in mthca_qp_event()
245 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); in mthca_qp_event()
248 spin_unlock(&dev->qp_table.lock); in mthca_qp_event()
265 spin_lock(&dev->qp_table.lock); in mthca_qp_event()
268 spin_unlock(&dev->qp_table.lock); in mthca_qp_event()
770 cpu_to_be32(dev->qp_table.rdb_base + in __mthca_modify_qp()
[all …]
H A Dmthca_main.c443 mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base, in mthca_init_icm()
448 if (!mdev->qp_table.qp_table) { in mthca_init_icm()
454 mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base, in mthca_init_icm()
459 if (!mdev->qp_table.eqp_table) { in mthca_init_icm()
465 mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base, in mthca_init_icm()
468 mdev->qp_table.rdb_shift, 0, in mthca_init_icm()
470 if (!mdev->qp_table.rdb_table) { in mthca_init_icm()
530 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); in mthca_init_icm()
533 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); in mthca_init_icm()
536 mthca_free_icm_table(mdev, mdev->qp_table.qp_table); in mthca_init_icm()
[all …]
H A Dmthca_profile.c204 for (dev->qp_table.rdb_shift = 0; in mthca_make_profile()
205 request->num_qp << dev->qp_table.rdb_shift < profile[i].num; in mthca_make_profile()
206 ++dev->qp_table.rdb_shift) in mthca_make_profile()
208 dev->qp_table.rdb_base = (u32) profile[i].start; in mthca_make_profile()
H A Dmthca_dev.h259 struct mthca_icm_table *qp_table; member
344 struct mthca_qp_table qp_table; member
H A Dmthca_cq.c520 *cur_qp = mthca_array_get(&dev->qp_table.qp, in mthca_poll_one()
/linux/drivers/infiniband/hw/mlx5/
H A Dqpc.c100 xa_lock_irqsave(&dev->qp_table.dct_xa, flags); in dct_event_notifier()
101 dct = xa_load(&dev->qp_table.dct_xa, qpn); in dct_event_notifier()
104 xa_unlock_irqrestore(&dev->qp_table.dct_xa, flags); in dct_event_notifier()
112 container_of(nb, struct mlx5_ib_dev, qp_table.nb); in rsc_event_notifier()
137 common = mlx5_get_rsc(&dev->qp_table, rsn); in rsc_event_notifier()
164 struct mlx5_qp_table *table = &dev->qp_table; in create_resource_common()
187 struct mlx5_qp_table *table = &dev->qp_table; in modify_resource_common_state()
198 struct mlx5_qp_table *table = &dev->qp_table; in destroy_resource_common()
236 err = xa_err(xa_store_irq(&dev->qp_table.dct_xa, qp->qpn, dct, GFP_KERNEL)); in mlx5_core_create_dct()
294 struct mlx5_qp_table *table = &dev->qp_table; in mlx5_core_destroy_dct()
[all …]
H A Dcq.c498 mqp = radix_tree_lookup(&dev->qp_table.tree, qpn); in mlx5_poll_one()
/linux/include/rdma/
H A Drdmavt_qp.h499 struct rvt_qp __rcu **qp_table; member
712 for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp; in rvt_lookup_qpn()
/linux/drivers/infiniband/hw/irdma/
H A Dmain.h325 struct irdma_qp **qp_table; member
H A Dhw.c249 iwqp = rf->qp_table[info->qp_cq_id]; in irdma_process_aeq()
2026 rf->qp_table = (struct irdma_qp **) in irdma_set_hw_rsrc()
2028 rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]); in irdma_set_hw_rsrc()
H A Dutils.c808 iwdev->rf->qp_table[qp_num] = NULL; in irdma_qp_rem_ref()
855 return &iwdev->rf->qp_table[qpn]->ibqp; in irdma_get_qp()
H A Dcm.c3452 if (!iwdev->rf->qp_table[iwqp->ibqp.qp_num]) { in irdma_cm_disconn()
H A Dverbs.c1107 rf->qp_table[qp_num] = iwqp; in irdma_create_qp()