Lines Matching +full:mailbox +full:-
18 * - Redistributions of source code must retain the above
22 * - Redistributions in binary form must reproduce the above
65 spin_lock_irqsave(&ctx->lock, flags); in mlx4_cq_tasklet_cb()
66 list_splice_tail_init(&ctx->list, &ctx->process_list); in mlx4_cq_tasklet_cb()
67 spin_unlock_irqrestore(&ctx->lock, flags); in mlx4_cq_tasklet_cb()
69 list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) { in mlx4_cq_tasklet_cb()
70 list_del_init(&mcq->tasklet_ctx.list); in mlx4_cq_tasklet_cb()
71 mcq->tasklet_ctx.comp(mcq); in mlx4_cq_tasklet_cb()
72 if (refcount_dec_and_test(&mcq->refcount)) in mlx4_cq_tasklet_cb()
73 complete(&mcq->free); in mlx4_cq_tasklet_cb()
78 if (!list_empty(&ctx->process_list)) in mlx4_cq_tasklet_cb()
79 tasklet_schedule(&ctx->task); in mlx4_cq_tasklet_cb()
84 struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; in mlx4_add_cq_to_tasklet()
88 spin_lock_irqsave(&tasklet_ctx->lock, flags); in mlx4_add_cq_to_tasklet()
94 if (list_empty_careful(&cq->tasklet_ctx.list)) { in mlx4_add_cq_to_tasklet()
95 refcount_inc(&cq->refcount); in mlx4_add_cq_to_tasklet()
96 kick = list_empty(&tasklet_ctx->list); in mlx4_add_cq_to_tasklet()
97 list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); in mlx4_add_cq_to_tasklet()
99 tasklet_schedule(&tasklet_ctx->task); in mlx4_add_cq_to_tasklet()
101 spin_unlock_irqrestore(&tasklet_ctx->lock, flags); in mlx4_add_cq_to_tasklet()
109 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, in mlx4_cq_completion()
110 cqn & (dev->caps.num_cqs - 1)); in mlx4_cq_completion()
121 ++cq->arm_sn; in mlx4_cq_completion()
123 cq->comp(cq); in mlx4_cq_completion()
128 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; in mlx4_cq_event()
132 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); in mlx4_cq_event()
143 cq->event(cq, event_type); in mlx4_cq_event()
146 static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, in mlx4_SW2HW_CQ() argument
149 return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, in mlx4_SW2HW_CQ()
154 static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, in mlx4_MODIFY_CQ() argument
157 return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ, in mlx4_MODIFY_CQ()
161 static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, in mlx4_HW2SW_CQ() argument
164 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, in mlx4_HW2SW_CQ()
165 cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ, in mlx4_HW2SW_CQ()
172 struct mlx4_cmd_mailbox *mailbox; in mlx4_cq_modify() local
176 mailbox = mlx4_alloc_cmd_mailbox(dev); in mlx4_cq_modify()
177 if (IS_ERR(mailbox)) in mlx4_cq_modify()
178 return PTR_ERR(mailbox); in mlx4_cq_modify()
180 cq_context = mailbox->buf; in mlx4_cq_modify()
181 cq_context->cq_max_count = cpu_to_be16(count); in mlx4_cq_modify()
182 cq_context->cq_period = cpu_to_be16(period); in mlx4_cq_modify()
184 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1); in mlx4_cq_modify()
186 mlx4_free_cmd_mailbox(dev, mailbox); in mlx4_cq_modify()
194 struct mlx4_cmd_mailbox *mailbox; in mlx4_cq_resize() local
199 mailbox = mlx4_alloc_cmd_mailbox(dev); in mlx4_cq_resize()
200 if (IS_ERR(mailbox)) in mlx4_cq_resize()
201 return PTR_ERR(mailbox); in mlx4_cq_resize()
203 cq_context = mailbox->buf; in mlx4_cq_resize()
204 cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24); in mlx4_cq_resize()
205 cq_context->log_page_size = mtt->page_shift - 12; in mlx4_cq_resize()
207 cq_context->mtt_base_addr_h = mtt_addr >> 32; in mlx4_cq_resize()
208 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); in mlx4_cq_resize()
210 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0); in mlx4_cq_resize()
212 mlx4_free_cmd_mailbox(dev, mailbox); in mlx4_cq_resize()
220 struct mlx4_cq_table *cq_table = &priv->cq_table; in __mlx4_cq_alloc_icm()
223 *cqn = mlx4_bitmap_alloc(&cq_table->bitmap); in __mlx4_cq_alloc_icm()
224 if (*cqn == -1) in __mlx4_cq_alloc_icm()
225 return -ENOMEM; in __mlx4_cq_alloc_icm()
227 err = mlx4_table_get(dev, &cq_table->table, *cqn); in __mlx4_cq_alloc_icm()
231 err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn); in __mlx4_cq_alloc_icm()
237 mlx4_table_put(dev, &cq_table->table, *cqn); in __mlx4_cq_alloc_icm()
240 mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR); in __mlx4_cq_alloc_icm()
267 struct mlx4_cq_table *cq_table = &priv->cq_table; in __mlx4_cq_free_icm()
269 mlx4_table_put(dev, &cq_table->cmpt_table, cqn); in __mlx4_cq_free_icm()
270 mlx4_table_put(dev, &cq_table->table, cqn); in __mlx4_cq_free_icm()
271 mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR); in __mlx4_cq_free_icm()
299 return -ENOMEM; in mlx4_init_user_cqes()
310 -EFAULT : 0; in mlx4_init_user_cqes()
319 -EFAULT : 0; in mlx4_init_user_cqes()
334 if (buf->nbufs == 1) in mlx4_init_kernel_cqes()
335 memset(buf->direct.buf, 0xcc, entries * cqe_size); in mlx4_init_kernel_cqes()
337 for (i = 0; i < buf->npages; i++) in mlx4_init_kernel_cqes()
338 memset(buf->page_list[i].buf, 0xcc, in mlx4_init_kernel_cqes()
339 1UL << buf->page_shift); in mlx4_init_kernel_cqes()
347 bool sw_cq_init = dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SW_CQ_INIT; in mlx4_cq_alloc()
349 struct mlx4_cq_table *cq_table = &priv->cq_table; in mlx4_cq_alloc()
350 struct mlx4_cmd_mailbox *mailbox; in mlx4_cq_alloc() local
355 if (vector >= dev->caps.num_comp_vectors) in mlx4_cq_alloc()
356 return -EINVAL; in mlx4_cq_alloc()
358 cq->vector = vector; in mlx4_cq_alloc()
360 err = mlx4_cq_alloc_icm(dev, &cq->cqn, cq->usage); in mlx4_cq_alloc()
364 spin_lock(&cq_table->lock); in mlx4_cq_alloc()
365 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); in mlx4_cq_alloc()
366 spin_unlock(&cq_table->lock); in mlx4_cq_alloc()
370 mailbox = mlx4_alloc_cmd_mailbox(dev); in mlx4_cq_alloc()
371 if (IS_ERR(mailbox)) { in mlx4_cq_alloc()
372 err = PTR_ERR(mailbox); in mlx4_cq_alloc()
376 cq_context = mailbox->buf; in mlx4_cq_alloc()
377 cq_context->flags = cpu_to_be32(!!collapsed << 18); in mlx4_cq_alloc()
379 cq_context->flags |= cpu_to_be32(1 << 19); in mlx4_cq_alloc()
381 cq_context->logsize_usrpage = in mlx4_cq_alloc()
383 mlx4_to_hw_uar_index(dev, uar->index)); in mlx4_cq_alloc()
384 cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn; in mlx4_cq_alloc()
385 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; in mlx4_cq_alloc()
388 cq_context->mtt_base_addr_h = mtt_addr >> 32; in mlx4_cq_alloc()
389 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); in mlx4_cq_alloc()
390 cq_context->db_rec_addr = cpu_to_be64(db_rec); in mlx4_cq_alloc()
395 dev->caps.cqe_size); in mlx4_cq_alloc()
400 dev->caps.cqe_size); in mlx4_cq_alloc()
404 err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn, sw_cq_init); in mlx4_cq_alloc()
406 mlx4_free_cmd_mailbox(dev, mailbox); in mlx4_cq_alloc()
410 cq->cons_index = 0; in mlx4_cq_alloc()
411 cq->arm_sn = 1; in mlx4_cq_alloc()
412 cq->uar = uar; in mlx4_cq_alloc()
413 refcount_set(&cq->refcount, 1); in mlx4_cq_alloc()
414 init_completion(&cq->free); in mlx4_cq_alloc()
415 cq->comp = mlx4_add_cq_to_tasklet; in mlx4_cq_alloc()
416 cq->tasklet_ctx.priv = in mlx4_cq_alloc()
417 &priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx; in mlx4_cq_alloc()
418 INIT_LIST_HEAD(&cq->tasklet_ctx.list); in mlx4_cq_alloc()
421 cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq; in mlx4_cq_alloc()
425 spin_lock(&cq_table->lock); in mlx4_cq_alloc()
426 radix_tree_delete(&cq_table->tree, cq->cqn); in mlx4_cq_alloc()
427 spin_unlock(&cq_table->lock); in mlx4_cq_alloc()
430 mlx4_cq_free_icm(dev, cq->cqn); in mlx4_cq_alloc()
439 struct mlx4_cq_table *cq_table = &priv->cq_table; in mlx4_cq_free()
442 err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn); in mlx4_cq_free()
444 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); in mlx4_cq_free()
446 spin_lock(&cq_table->lock); in mlx4_cq_free()
447 radix_tree_delete(&cq_table->tree, cq->cqn); in mlx4_cq_free()
448 spin_unlock(&cq_table->lock); in mlx4_cq_free()
450 synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq); in mlx4_cq_free()
451 if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq != in mlx4_cq_free()
452 priv->eq_table.eq[MLX4_EQ_ASYNC].irq) in mlx4_cq_free()
453 synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq); in mlx4_cq_free()
455 if (refcount_dec_and_test(&cq->refcount)) in mlx4_cq_free()
456 complete(&cq->free); in mlx4_cq_free()
457 wait_for_completion(&cq->free); in mlx4_cq_free()
459 mlx4_cq_free_icm(dev, cq->cqn); in mlx4_cq_free()
465 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; in mlx4_init_cq_table()
467 spin_lock_init(&cq_table->lock); in mlx4_init_cq_table()
468 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); in mlx4_init_cq_table()
472 return mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs, in mlx4_init_cq_table()
473 dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0); in mlx4_init_cq_table()
481 mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap); in mlx4_cleanup_cq_table()