xref: /linux/drivers/net/ethernet/mellanox/mlx4/cq.c (revision 4949009eb8d40a441dcddcd96e101e77d31cf1b2)
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
6  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7  *
8  * This software is available to you under a choice of one of two
9  * licenses.  You may choose to be licensed under the terms of the GNU
10  * General Public License (GPL) Version 2, available from the file
11  * COPYING in the main directory of this source tree, or the
12  * OpenIB.org BSD license below:
13  *
14  *     Redistribution and use in source and binary forms, with or
15  *     without modification, are permitted provided that the following
16  *     conditions are met:
17  *
18  *      - Redistributions of source code must retain the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer.
21  *
22  *      - Redistributions in binary form must reproduce the above
23  *        copyright notice, this list of conditions and the following
24  *        disclaimer in the documentation and/or other materials
25  *        provided with the distribution.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34  * SOFTWARE.
35  */
36 
37 #include <linux/hardirq.h>
38 #include <linux/export.h>
39 
40 #include <linux/mlx4/cmd.h>
41 #include <linux/mlx4/cq.h>
42 
43 #include "mlx4.h"
44 #include "icm.h"
45 
46 #define MLX4_CQ_STATUS_OK		( 0 << 28)
47 #define MLX4_CQ_STATUS_OVERFLOW		( 9 << 28)
48 #define MLX4_CQ_STATUS_WRITE_FAIL	(10 << 28)
49 #define MLX4_CQ_FLAG_CC			( 1 << 18)
50 #define MLX4_CQ_FLAG_OI			( 1 << 17)
51 #define MLX4_CQ_STATE_ARMED		( 9 <<  8)
52 #define MLX4_CQ_STATE_ARMED_SOL		( 6 <<  8)
53 #define MLX4_EQ_STATE_FIRED		(10 <<  8)
54 
55 #define TASKLET_MAX_TIME 2
56 #define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
57 
58 void mlx4_cq_tasklet_cb(unsigned long data)
59 {
60 	unsigned long flags;
61 	unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
62 	struct mlx4_eq_tasklet *ctx = (struct mlx4_eq_tasklet *)data;
63 	struct mlx4_cq *mcq, *temp;
64 
65 	spin_lock_irqsave(&ctx->lock, flags);
66 	list_splice_tail_init(&ctx->list, &ctx->process_list);
67 	spin_unlock_irqrestore(&ctx->lock, flags);
68 
69 	list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) {
70 		list_del_init(&mcq->tasklet_ctx.list);
71 		mcq->tasklet_ctx.comp(mcq);
72 		if (atomic_dec_and_test(&mcq->refcount))
73 			complete(&mcq->free);
74 		if (time_after(jiffies, end))
75 			break;
76 	}
77 
78 	if (!list_empty(&ctx->process_list))
79 		tasklet_schedule(&ctx->task);
80 }
81 
82 static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
83 {
84 	unsigned long flags;
85 	struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
86 
87 	spin_lock_irqsave(&tasklet_ctx->lock, flags);
88 	/* When migrating CQs between EQs will be implemented, please note
89 	 * that you need to sync this point. It is possible that
90 	 * while migrating a CQ, completions on the old EQs could
91 	 * still arrive.
92 	 */
93 	if (list_empty_careful(&cq->tasklet_ctx.list)) {
94 		atomic_inc(&cq->refcount);
95 		list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
96 	}
97 	spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
98 }
99 
100 void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
101 {
102 	struct mlx4_cq *cq;
103 
104 	cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
105 			       cqn & (dev->caps.num_cqs - 1));
106 	if (!cq) {
107 		mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
108 		return;
109 	}
110 
111 	++cq->arm_sn;
112 
113 	cq->comp(cq);
114 }
115 
116 void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
117 {
118 	struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
119 	struct mlx4_cq *cq;
120 
121 	spin_lock(&cq_table->lock);
122 
123 	cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
124 	if (cq)
125 		atomic_inc(&cq->refcount);
126 
127 	spin_unlock(&cq_table->lock);
128 
129 	if (!cq) {
130 		mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
131 		return;
132 	}
133 
134 	cq->event(cq, event_type);
135 
136 	if (atomic_dec_and_test(&cq->refcount))
137 		complete(&cq->free);
138 }
139 
140 static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
141 			 int cq_num)
142 {
143 	return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
144 			MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
145 			MLX4_CMD_WRAPPED);
146 }
147 
148 static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
149 			 int cq_num, u32 opmod)
150 {
151 	return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
152 			MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
153 }
154 
155 static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
156 			 int cq_num)
157 {
158 	return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
159 			    cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
160 			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
161 }
162 
163 int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
164 		   u16 count, u16 period)
165 {
166 	struct mlx4_cmd_mailbox *mailbox;
167 	struct mlx4_cq_context *cq_context;
168 	int err;
169 
170 	mailbox = mlx4_alloc_cmd_mailbox(dev);
171 	if (IS_ERR(mailbox))
172 		return PTR_ERR(mailbox);
173 
174 	cq_context = mailbox->buf;
175 	cq_context->cq_max_count = cpu_to_be16(count);
176 	cq_context->cq_period    = cpu_to_be16(period);
177 
178 	err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
179 
180 	mlx4_free_cmd_mailbox(dev, mailbox);
181 	return err;
182 }
183 EXPORT_SYMBOL_GPL(mlx4_cq_modify);
184 
185 int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
186 		   int entries, struct mlx4_mtt *mtt)
187 {
188 	struct mlx4_cmd_mailbox *mailbox;
189 	struct mlx4_cq_context *cq_context;
190 	u64 mtt_addr;
191 	int err;
192 
193 	mailbox = mlx4_alloc_cmd_mailbox(dev);
194 	if (IS_ERR(mailbox))
195 		return PTR_ERR(mailbox);
196 
197 	cq_context = mailbox->buf;
198 	cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
199 	cq_context->log_page_size   = mtt->page_shift - 12;
200 	mtt_addr = mlx4_mtt_addr(dev, mtt);
201 	cq_context->mtt_base_addr_h = mtt_addr >> 32;
202 	cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
203 
204 	err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
205 
206 	mlx4_free_cmd_mailbox(dev, mailbox);
207 	return err;
208 }
209 EXPORT_SYMBOL_GPL(mlx4_cq_resize);
210 
211 int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
212 {
213 	struct mlx4_priv *priv = mlx4_priv(dev);
214 	struct mlx4_cq_table *cq_table = &priv->cq_table;
215 	int err;
216 
217 	*cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
218 	if (*cqn == -1)
219 		return -ENOMEM;
220 
221 	err = mlx4_table_get(dev, &cq_table->table, *cqn, GFP_KERNEL);
222 	if (err)
223 		goto err_out;
224 
225 	err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn, GFP_KERNEL);
226 	if (err)
227 		goto err_put;
228 	return 0;
229 
230 err_put:
231 	mlx4_table_put(dev, &cq_table->table, *cqn);
232 
233 err_out:
234 	mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR);
235 	return err;
236 }
237 
238 static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
239 {
240 	u64 out_param;
241 	int err;
242 
243 	if (mlx4_is_mfunc(dev)) {
244 		err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
245 				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
246 				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
247 		if (err)
248 			return err;
249 		else {
250 			*cqn = get_param_l(&out_param);
251 			return 0;
252 		}
253 	}
254 	return __mlx4_cq_alloc_icm(dev, cqn);
255 }
256 
257 void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
258 {
259 	struct mlx4_priv *priv = mlx4_priv(dev);
260 	struct mlx4_cq_table *cq_table = &priv->cq_table;
261 
262 	mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
263 	mlx4_table_put(dev, &cq_table->table, cqn);
264 	mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR);
265 }
266 
267 static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
268 {
269 	u64 in_param = 0;
270 	int err;
271 
272 	if (mlx4_is_mfunc(dev)) {
273 		set_param_l(&in_param, cqn);
274 		err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
275 			       MLX4_CMD_FREE_RES,
276 			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
277 		if (err)
278 			mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
279 	} else
280 		__mlx4_cq_free_icm(dev, cqn);
281 }
282 
283 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
284 		  struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
285 		  struct mlx4_cq *cq, unsigned vector, int collapsed,
286 		  int timestamp_en)
287 {
288 	struct mlx4_priv *priv = mlx4_priv(dev);
289 	struct mlx4_cq_table *cq_table = &priv->cq_table;
290 	struct mlx4_cmd_mailbox *mailbox;
291 	struct mlx4_cq_context *cq_context;
292 	u64 mtt_addr;
293 	int err;
294 
295 	if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
296 		return -EINVAL;
297 
298 	cq->vector = vector;
299 
300 	err = mlx4_cq_alloc_icm(dev, &cq->cqn);
301 	if (err)
302 		return err;
303 
304 	spin_lock_irq(&cq_table->lock);
305 	err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
306 	spin_unlock_irq(&cq_table->lock);
307 	if (err)
308 		goto err_icm;
309 
310 	mailbox = mlx4_alloc_cmd_mailbox(dev);
311 	if (IS_ERR(mailbox)) {
312 		err = PTR_ERR(mailbox);
313 		goto err_radix;
314 	}
315 
316 	cq_context = mailbox->buf;
317 	cq_context->flags	    = cpu_to_be32(!!collapsed << 18);
318 	if (timestamp_en)
319 		cq_context->flags  |= cpu_to_be32(1 << 19);
320 
321 	cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
322 	cq_context->comp_eqn	    = priv->eq_table.eq[vector].eqn;
323 	cq_context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
324 
325 	mtt_addr = mlx4_mtt_addr(dev, mtt);
326 	cq_context->mtt_base_addr_h = mtt_addr >> 32;
327 	cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
328 	cq_context->db_rec_addr     = cpu_to_be64(db_rec);
329 
330 	err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
331 	mlx4_free_cmd_mailbox(dev, mailbox);
332 	if (err)
333 		goto err_radix;
334 
335 	cq->cons_index = 0;
336 	cq->arm_sn     = 1;
337 	cq->uar        = uar;
338 	atomic_set(&cq->refcount, 1);
339 	init_completion(&cq->free);
340 	cq->comp = mlx4_add_cq_to_tasklet;
341 	cq->tasklet_ctx.priv =
342 		&priv->eq_table.eq[cq->vector].tasklet_ctx;
343 	INIT_LIST_HEAD(&cq->tasklet_ctx.list);
344 
345 
346 	cq->irq = priv->eq_table.eq[cq->vector].irq;
347 	return 0;
348 
349 err_radix:
350 	spin_lock_irq(&cq_table->lock);
351 	radix_tree_delete(&cq_table->tree, cq->cqn);
352 	spin_unlock_irq(&cq_table->lock);
353 
354 err_icm:
355 	mlx4_cq_free_icm(dev, cq->cqn);
356 
357 	return err;
358 }
359 EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
360 
361 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
362 {
363 	struct mlx4_priv *priv = mlx4_priv(dev);
364 	struct mlx4_cq_table *cq_table = &priv->cq_table;
365 	int err;
366 
367 	err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
368 	if (err)
369 		mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
370 
371 	synchronize_irq(priv->eq_table.eq[cq->vector].irq);
372 
373 	spin_lock_irq(&cq_table->lock);
374 	radix_tree_delete(&cq_table->tree, cq->cqn);
375 	spin_unlock_irq(&cq_table->lock);
376 
377 	if (atomic_dec_and_test(&cq->refcount))
378 		complete(&cq->free);
379 	wait_for_completion(&cq->free);
380 
381 	mlx4_cq_free_icm(dev, cq->cqn);
382 }
383 EXPORT_SYMBOL_GPL(mlx4_cq_free);
384 
385 int mlx4_init_cq_table(struct mlx4_dev *dev)
386 {
387 	struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
388 	int err;
389 
390 	spin_lock_init(&cq_table->lock);
391 	INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
392 	if (mlx4_is_slave(dev))
393 		return 0;
394 
395 	err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
396 			       dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
397 	if (err)
398 		return err;
399 
400 	return 0;
401 }
402 
403 void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
404 {
405 	if (mlx4_is_slave(dev))
406 		return;
407 	/* Nothing to do to clean up radix_tree */
408 	mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
409 }
410