Home
last modified time | relevance | path

Searched refs:cq_table (Results 1 – 17 of 17) sorted by relevance

/linux/drivers/net/ethernet/mellanox/mlx4/
H A Dcq.c109 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, in mlx4_cq_completion()
128 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; in mlx4_cq_event() local
132 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); in mlx4_cq_event()
220 struct mlx4_cq_table *cq_table = &priv->cq_table; in __mlx4_cq_alloc_icm() local
223 *cqn = mlx4_bitmap_alloc(&cq_table->bitmap); in __mlx4_cq_alloc_icm()
227 err = mlx4_table_get(dev, &cq_table->table, *cqn); in __mlx4_cq_alloc_icm()
231 err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn); in __mlx4_cq_alloc_icm()
237 mlx4_table_put(dev, &cq_table->table, *cqn); in __mlx4_cq_alloc_icm()
240 mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR); in __mlx4_cq_alloc_icm()
267 struct mlx4_cq_table *cq_table = &priv->cq_table; in __mlx4_cq_free_icm() local
[all …]
H A Dmain.c1626 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, in mlx4_init_cmpt_table()
1647 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); in mlx4_init_cmpt_table()
1781 err = mlx4_init_icm_table(dev, &priv->cq_table.table, in mlx4_init_icm()
1825 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); in mlx4_init_icm()
1850 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); in mlx4_init_icm()
1869 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); in mlx4_free_icms()
1878 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); in mlx4_free_icms()
H A Dmlx4.h903 struct mlx4_cq_table cq_table; member
/linux/drivers/infiniband/hw/mthca/
H A Dmthca_cq.c221 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); in mthca_cq_completion()
239 spin_lock(&dev->cq_table.lock); in mthca_cq_event()
241 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); in mthca_cq_event()
245 spin_unlock(&dev->cq_table.lock); in mthca_cq_event()
258 spin_lock(&dev->cq_table.lock); in mthca_cq_event()
261 spin_unlock(&dev->cq_table.lock); in mthca_cq_event()
779 cq->cqn = mthca_alloc(&dev->cq_table.alloc); in mthca_init_cq()
784 err = mthca_table_get(dev, dev->cq_table.table, cq->cqn); in mthca_init_cq()
850 spin_lock_irq(&dev->cq_table.lock); in mthca_init_cq()
851 err = mthca_array_set(&dev->cq_table.cq, in mthca_init_cq()
[all …]
H A Dmthca_main.c476 mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, in mthca_init_icm()
481 if (!mdev->cq_table.table) { in mthca_init_icm()
527 mthca_free_icm_table(mdev, mdev->cq_table.table); in mthca_init_icm()
562 mthca_free_icm_table(mdev, mdev->cq_table.table); in mthca_free_icms()
H A Dmthca_dev.h342 struct mthca_cq_table cq_table; member
/linux/drivers/infiniband/hw/mana/
H A Dcq.c146 WARN_ON(gc->cq_table[cq->queue.id]); in mana_ib_install_cq_cb()
158 gc->cq_table[cq->queue.id] = gdma_cq; in mana_ib_install_cq_cb()
173 kfree(gc->cq_table[cq->queue.id]); in mana_ib_remove_cq_cb()
174 gc->cq_table[cq->queue.id] = NULL; in mana_ib_remove_cq_cb()
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Deq.c94 struct mlx5_cq_table *table = &eq->cq_table; in mlx5_eq_cq_get()
258 struct mlx5_cq_table *cq_table = &eq->cq_table; in create_map_eq() local
271 memset(cq_table, 0, sizeof(*cq_table)); in create_map_eq()
272 spin_lock_init(&cq_table->lock); in create_map_eq()
273 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); in create_map_eq()
398 struct mlx5_cq_table *table = &eq->cq_table; in mlx5_eq_add_cq()
410 struct mlx5_cq_table *table = &eq->cq_table; in mlx5_eq_del_cq()
/linux/drivers/net/ethernet/microsoft/mana/
H A Dhw_channel.c692 gc->cq_table = vcalloc(gc->max_num_cqs, sizeof(struct gdma_queue *)); in mana_hwc_establish_channel()
693 if (!gc->cq_table) in mana_hwc_establish_channel()
696 gc->cq_table[cq->id] = cq; in mana_hwc_establish_channel()
842 vfree(gc->cq_table); in mana_hwc_destroy_channel()
843 gc->cq_table = NULL; in mana_hwc_destroy_channel()
H A Dgdma_main.c584 cq = gc->cq_table[cq_id]; in mana_gd_process_eqe()
911 if (!gc->cq_table[id]) in mana_gd_destroy_cq()
914 gc->cq_table[id] = NULL; in mana_gd_destroy_cq()
H A Dmana_en.c2434 gc->cq_table[cq->gdma_id] = cq->gdma_cq; in mana_create_txq()
2735 gc->cq_table[cq->gdma_id] = cq->gdma_cq; in mana_create_rxq()
/linux/drivers/infiniband/hw/hns/
H A Dhns_roce_main.c943 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table, in hns_roce_init_hem()
1032 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); in hns_roce_init_hem()
H A Dhns_roce_hem.c893 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); in hns_roce_cleanup_hem()
/linux/include/net/mana/
H A Dgdma.h406 struct gdma_queue **cq_table;
400 struct gdma_queue **cq_table; global() member
/linux/drivers/infiniband/hw/irdma/
H A Dhw.c113 struct irdma_cq *icq = READ_ONCE(rf->cq_table[cq_idx]); in irdma_process_normal_ceqe()
417 iwcq = rf->cq_table[info->qp_cq_id]; in irdma_process_aeq()
2095 rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]); in irdma_set_hw_rsrc()
H A Dutils.c834 WRITE_ONCE(iwdev->rf->cq_table[iwcq->cq_num], NULL); in irdma_cq_rem_ref()
H A Dverbs.c2676 smp_store_release(&rf->cq_table[cq_num], iwcq); in irdma_create_cq()