1 /*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include "opt_rss.h"
27 #include "opt_ratelimit.h"
28
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/hardirq.h>
32 #include <dev/mlx5/driver.h>
33 #include <rdma/ib_verbs.h>
34 #include <dev/mlx5/cq.h>
35 #include <dev/mlx5/mlx5_core/mlx5_core.h>
36
37 #include <sys/epoch.h>
38
39 static void
mlx5_cq_table_write_lock(struct mlx5_cq_table * table)40 mlx5_cq_table_write_lock(struct mlx5_cq_table *table)
41 {
42
43 atomic_inc(&table->writercount);
44 /* make sure all see the updated writercount */
45 NET_EPOCH_WAIT();
46 spin_lock(&table->writerlock);
47 }
48
49 static void
mlx5_cq_table_write_unlock(struct mlx5_cq_table * table)50 mlx5_cq_table_write_unlock(struct mlx5_cq_table *table)
51 {
52
53 spin_unlock(&table->writerlock);
54 atomic_dec(&table->writercount);
55 /* drain all pending CQ callers */
56 NET_EPOCH_WAIT();
57 }
58
mlx5_cq_completion(struct mlx5_core_dev * dev,struct mlx5_eqe * eqe)59 void mlx5_cq_completion(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
60 {
61 struct mlx5_cq_table *table = &dev->priv.cq_table;
62 struct mlx5_core_cq *cq;
63 struct epoch_tracker et;
64 u32 cqn;
65 bool do_lock;
66
67 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
68
69 NET_EPOCH_ENTER(et);
70
71 do_lock = atomic_read(&table->writercount) != 0;
72 if (unlikely(do_lock))
73 spin_lock(&table->writerlock);
74
75 if (likely(cqn < MLX5_CQ_LINEAR_ARRAY_SIZE))
76 cq = table->linear_array[cqn].cq;
77 else
78 cq = radix_tree_lookup(&table->tree, cqn);
79
80 if (unlikely(do_lock))
81 spin_unlock(&table->writerlock);
82
83 if (likely(cq != NULL)) {
84 ++cq->arm_sn;
85 cq->comp(cq, eqe);
86 } else {
87 mlx5_core_warn(dev,
88 "Completion event for bogus CQ 0x%x\n", cqn);
89 }
90
91 NET_EPOCH_EXIT(et);
92 }
93
mlx5_cq_event(struct mlx5_core_dev * dev,u32 cqn,int event_type)94 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
95 {
96 struct mlx5_cq_table *table = &dev->priv.cq_table;
97 struct mlx5_core_cq *cq;
98 struct epoch_tracker et;
99 bool do_lock;
100
101 NET_EPOCH_ENTER(et);
102
103 do_lock = atomic_read(&table->writercount) != 0;
104 if (unlikely(do_lock))
105 spin_lock(&table->writerlock);
106
107 if (likely(cqn < MLX5_CQ_LINEAR_ARRAY_SIZE))
108 cq = table->linear_array[cqn].cq;
109 else
110 cq = radix_tree_lookup(&table->tree, cqn);
111
112 if (unlikely(do_lock))
113 spin_unlock(&table->writerlock);
114
115 if (likely(cq != NULL)) {
116 cq->event(cq, event_type);
117 } else {
118 mlx5_core_warn(dev,
119 "Asynchronous event for bogus CQ 0x%x\n", cqn);
120 }
121
122 NET_EPOCH_EXIT(et);
123 }
124
mlx5_core_create_cq(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq,u32 * in,int inlen,u32 * out,int outlen)125 int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
126 u32 *in, int inlen, u32 *out, int outlen)
127 {
128 struct mlx5_cq_table *table = &dev->priv.cq_table;
129 u32 din[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
130 u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
131 int err;
132
133 memset(out, 0, outlen);
134 MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
135 err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
136 if (err)
137 return err;
138
139 cq->cqn = MLX5_GET(create_cq_out, out, cqn);
140 cq->cons_index = 0;
141 cq->arm_sn = 0;
142
143 mlx5_cq_table_write_lock(table);
144 err = radix_tree_insert(&table->tree, cq->cqn, cq);
145 if (likely(err == 0 && cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE))
146 table->linear_array[cq->cqn].cq = cq;
147 mlx5_cq_table_write_unlock(table);
148
149 if (err)
150 goto err_cmd;
151
152 cq->pid = curthread->td_proc->p_pid;
153 cq->uar = dev->priv.uar;
154
155 return 0;
156
157 err_cmd:
158 MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
159 MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
160 mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
161 return err;
162 }
163 EXPORT_SYMBOL(mlx5_core_create_cq);
164
mlx5_core_destroy_cq(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq)165 int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
166 {
167 struct mlx5_cq_table *table = &dev->priv.cq_table;
168 u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
169 u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
170 struct mlx5_core_cq *tmp;
171
172 mlx5_cq_table_write_lock(table);
173 if (likely(cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE))
174 table->linear_array[cq->cqn].cq = NULL;
175 tmp = radix_tree_delete(&table->tree, cq->cqn);
176 mlx5_cq_table_write_unlock(table);
177
178 if (unlikely(tmp == NULL)) {
179 mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn);
180 return -EINVAL;
181 } else if (unlikely(tmp != cq)) {
182 mlx5_core_warn(dev, "corrupted cqn 0x%x\n", cq->cqn);
183 return -EINVAL;
184 }
185
186 MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
187 MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
188 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
189 }
190 EXPORT_SYMBOL(mlx5_core_destroy_cq);
191
mlx5_core_query_cq(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq,u32 * out,int outlen)192 int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
193 u32 *out, int outlen)
194 {
195 u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0};
196
197 MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ);
198 MLX5_SET(query_cq_in, in, cqn, cq->cqn);
199
200 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
201 }
202 EXPORT_SYMBOL(mlx5_core_query_cq);
203
204
mlx5_core_modify_cq(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq,u32 * in,int inlen)205 int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
206 u32 *in, int inlen)
207 {
208 u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
209
210 MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
211 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
212 }
213 EXPORT_SYMBOL(mlx5_core_modify_cq);
214
mlx5_core_modify_cq_moderation(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq,u16 cq_period,u16 cq_max_count)215 int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
216 struct mlx5_core_cq *cq,
217 u16 cq_period,
218 u16 cq_max_count)
219 {
220 return (mlx5_core_modify_cq_by_mask(dev, cq,
221 MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT,
222 cq_period, cq_max_count, 0, 0));
223 }
224
mlx5_core_modify_cq_moderation_mode(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq,u16 cq_period,u16 cq_max_count,u8 cq_mode)225 int mlx5_core_modify_cq_moderation_mode(struct mlx5_core_dev *dev,
226 struct mlx5_core_cq *cq,
227 u16 cq_period,
228 u16 cq_max_count,
229 u8 cq_mode)
230 {
231 return (mlx5_core_modify_cq_by_mask(dev, cq,
232 MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT | MLX5_CQ_MODIFY_PERIOD_MODE,
233 cq_period, cq_max_count, cq_mode, 0));
234 }
235
236 int
mlx5_core_modify_cq_by_mask(struct mlx5_core_dev * dev,struct mlx5_core_cq * cq,u32 mask,u16 cq_period,u16 cq_max_count,u8 cq_mode,u8 cq_eqn)237 mlx5_core_modify_cq_by_mask(struct mlx5_core_dev *dev,
238 struct mlx5_core_cq *cq, u32 mask,
239 u16 cq_period, u16 cq_max_count, u8 cq_mode, u8 cq_eqn)
240 {
241 u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {};
242 void *cqc;
243
244 MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
245 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
246 if (mask & MLX5_CQ_MODIFY_PERIOD)
247 MLX5_SET(cqc, cqc, cq_period, cq_period);
248 if (mask & MLX5_CQ_MODIFY_COUNT)
249 MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
250 if (mask & MLX5_CQ_MODIFY_PERIOD_MODE)
251 MLX5_SET(cqc, cqc, cq_period_mode, cq_mode);
252 if (mask & MLX5_CQ_MODIFY_EQN)
253 MLX5_SET(cqc, cqc, c_eqn, cq_eqn);
254
255 MLX5_SET(modify_cq_in, in,
256 modify_field_select_resize_field_select.modify_field_select.modify_field_select, mask);
257
258 return (mlx5_core_modify_cq(dev, cq, in, sizeof(in)));
259 }
260
mlx5_init_cq_table(struct mlx5_core_dev * dev)261 int mlx5_init_cq_table(struct mlx5_core_dev *dev)
262 {
263 struct mlx5_cq_table *table = &dev->priv.cq_table;
264
265 memset(table, 0, sizeof(*table));
266 spin_lock_init(&table->writerlock);
267 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
268
269 return 0;
270 }
271
mlx5_cleanup_cq_table(struct mlx5_core_dev * dev)272 void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev)
273 {
274 }
275