Lines Matching +full:mailbox +full:-

15  *      - Redistributions of source code must retain the above
19 * - Redistributions in binary form must reproduce the above
45 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; in mlx4_srq_event()
49 srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1)); in mlx4_srq_event()
52 refcount_inc(&srq->refcount); in mlx4_srq_event()
58 srq->event(srq, event_type); in mlx4_srq_event()
60 if (refcount_dec_and_test(&srq->refcount)) in mlx4_srq_event()
61 complete(&srq->free); in mlx4_srq_event()
64 static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, in mlx4_SW2HW_SRQ() argument
67 return mlx4_cmd(dev, mailbox->dma, srq_num, 0, in mlx4_SW2HW_SRQ()
72 static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, in mlx4_HW2SW_SRQ() argument
75 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num, in mlx4_HW2SW_SRQ()
76 mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ, in mlx4_HW2SW_SRQ()
86 static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, in mlx4_QUERY_SRQ() argument
89 return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ, in mlx4_QUERY_SRQ()
95 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; in __mlx4_srq_alloc_icm()
99 *srqn = mlx4_bitmap_alloc(&srq_table->bitmap); in __mlx4_srq_alloc_icm()
100 if (*srqn == -1) in __mlx4_srq_alloc_icm()
101 return -ENOMEM; in __mlx4_srq_alloc_icm()
103 err = mlx4_table_get(dev, &srq_table->table, *srqn); in __mlx4_srq_alloc_icm()
107 err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn); in __mlx4_srq_alloc_icm()
113 mlx4_table_put(dev, &srq_table->table, *srqn); in __mlx4_srq_alloc_icm()
116 mlx4_bitmap_free(&srq_table->bitmap, *srqn, MLX4_NO_RR); in __mlx4_srq_alloc_icm()
140 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; in __mlx4_srq_free_icm()
142 mlx4_table_put(dev, &srq_table->cmpt_table, srqn); in __mlx4_srq_free_icm()
143 mlx4_table_put(dev, &srq_table->table, srqn); in __mlx4_srq_free_icm()
144 mlx4_bitmap_free(&srq_table->bitmap, srqn, MLX4_NO_RR); in __mlx4_srq_free_icm()
165 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; in mlx4_srq_alloc()
166 struct mlx4_cmd_mailbox *mailbox; in mlx4_srq_alloc() local
171 err = mlx4_srq_alloc_icm(dev, &srq->srqn); in mlx4_srq_alloc()
175 spin_lock_irq(&srq_table->lock); in mlx4_srq_alloc()
176 err = radix_tree_insert(&srq_table->tree, srq->srqn, srq); in mlx4_srq_alloc()
177 spin_unlock_irq(&srq_table->lock); in mlx4_srq_alloc()
181 mailbox = mlx4_alloc_cmd_mailbox(dev); in mlx4_srq_alloc()
182 if (IS_ERR(mailbox)) { in mlx4_srq_alloc()
183 err = PTR_ERR(mailbox); in mlx4_srq_alloc()
187 srq_context = mailbox->buf; in mlx4_srq_alloc()
188 srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) | in mlx4_srq_alloc()
189 srq->srqn); in mlx4_srq_alloc()
190 srq_context->logstride = srq->wqe_shift - 4; in mlx4_srq_alloc()
191 srq_context->xrcd = cpu_to_be16(xrcd); in mlx4_srq_alloc()
192 srq_context->pg_offset_cqn = cpu_to_be32(cqn & 0xffffff); in mlx4_srq_alloc()
193 srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; in mlx4_srq_alloc()
196 srq_context->mtt_base_addr_h = mtt_addr >> 32; in mlx4_srq_alloc()
197 srq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); in mlx4_srq_alloc()
198 srq_context->pd = cpu_to_be32(pdn); in mlx4_srq_alloc()
199 srq_context->db_rec_addr = cpu_to_be64(db_rec); in mlx4_srq_alloc()
201 err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn); in mlx4_srq_alloc()
202 mlx4_free_cmd_mailbox(dev, mailbox); in mlx4_srq_alloc()
206 refcount_set(&srq->refcount, 1); in mlx4_srq_alloc()
207 init_completion(&srq->free); in mlx4_srq_alloc()
212 spin_lock_irq(&srq_table->lock); in mlx4_srq_alloc()
213 radix_tree_delete(&srq_table->tree, srq->srqn); in mlx4_srq_alloc()
214 spin_unlock_irq(&srq_table->lock); in mlx4_srq_alloc()
217 mlx4_srq_free_icm(dev, srq->srqn); in mlx4_srq_alloc()
224 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; in mlx4_srq_free()
227 err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn); in mlx4_srq_free()
229 mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn); in mlx4_srq_free()
231 spin_lock_irq(&srq_table->lock); in mlx4_srq_free()
232 radix_tree_delete(&srq_table->tree, srq->srqn); in mlx4_srq_free()
233 spin_unlock_irq(&srq_table->lock); in mlx4_srq_free()
235 if (refcount_dec_and_test(&srq->refcount)) in mlx4_srq_free()
236 complete(&srq->free); in mlx4_srq_free()
237 wait_for_completion(&srq->free); in mlx4_srq_free()
239 mlx4_srq_free_icm(dev, srq->srqn); in mlx4_srq_free()
245 return mlx4_ARM_SRQ(dev, srq->srqn, limit_watermark); in mlx4_srq_arm()
251 struct mlx4_cmd_mailbox *mailbox; in mlx4_srq_query() local
255 mailbox = mlx4_alloc_cmd_mailbox(dev); in mlx4_srq_query()
256 if (IS_ERR(mailbox)) in mlx4_srq_query()
257 return PTR_ERR(mailbox); in mlx4_srq_query()
259 srq_context = mailbox->buf; in mlx4_srq_query()
261 err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn); in mlx4_srq_query()
264 *limit_watermark = be16_to_cpu(srq_context->limit_watermark); in mlx4_srq_query()
267 mlx4_free_cmd_mailbox(dev, mailbox); in mlx4_srq_query()
274 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; in mlx4_init_srq_table()
276 spin_lock_init(&srq_table->lock); in mlx4_init_srq_table()
277 INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); in mlx4_init_srq_table()
281 return mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, in mlx4_init_srq_table()
282 dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0); in mlx4_init_srq_table()
289 mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap); in mlx4_cleanup_srq_table()
294 struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; in mlx4_srq_lookup()
298 srq = radix_tree_lookup(&srq_table->tree, in mlx4_srq_lookup()
299 srqn & (dev->caps.num_srqs - 1)); in mlx4_srq_lookup()