1 /*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include "opt_rss.h"
27 #include "opt_ratelimit.h"
28
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <dev/mlx5/driver.h>
32 #include <dev/mlx5/srq.h>
33 #include <rdma/ib_verbs.h>
34 #include <dev/mlx5/mlx5_core/mlx5_core.h>
35 #include <dev/mlx5/mlx5_core/transobj.h>
36
mlx5_srq_event(struct mlx5_core_dev * dev,u32 srqn,int event_type)37 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
38 {
39 struct mlx5_srq_table *table = &dev->priv.srq_table;
40 struct mlx5_core_srq *srq;
41
42 spin_lock(&table->lock);
43
44 srq = radix_tree_lookup(&table->tree, srqn);
45 if (srq)
46 atomic_inc(&srq->refcount);
47
48 spin_unlock(&table->lock);
49
50 if (!srq) {
51 mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
52 return;
53 }
54
55 srq->event(srq, event_type);
56
57 if (atomic_dec_and_test(&srq->refcount))
58 complete(&srq->free);
59 }
60
set_wq(void * wq,struct mlx5_srq_attr * in)61 static void set_wq(void *wq, struct mlx5_srq_attr *in)
62 {
63 MLX5_SET(wq, wq, wq_signature, !!(in->flags & MLX5_SRQ_FLAG_WQ_SIG));
64 MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size);
65 MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4);
66 MLX5_SET(wq, wq, log_wq_sz, in->log_size);
67 MLX5_SET(wq, wq, page_offset, in->page_offset);
68 MLX5_SET(wq, wq, lwm, in->lwm);
69 MLX5_SET(wq, wq, pd, in->pd);
70 MLX5_SET64(wq, wq, dbr_addr, in->db_record);
71 }
72
set_srqc(void * srqc,struct mlx5_srq_attr * in)73 static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
74 {
75 MLX5_SET(srqc, srqc, wq_signature, !!(in->flags & MLX5_SRQ_FLAG_WQ_SIG));
76 MLX5_SET(srqc, srqc, log_page_size, in->log_page_size);
77 MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift);
78 MLX5_SET(srqc, srqc, log_srq_size, in->log_size);
79 MLX5_SET(srqc, srqc, page_offset, in->page_offset);
80 MLX5_SET(srqc, srqc, lwm, in->lwm);
81 MLX5_SET(srqc, srqc, pd, in->pd);
82 MLX5_SET64(srqc, srqc, dbr_addr, in->db_record);
83 MLX5_SET(srqc, srqc, xrcd, in->xrcd);
84 MLX5_SET(srqc, srqc, cqn, in->cqn);
85 }
86
get_wq(void * wq,struct mlx5_srq_attr * in)87 static void get_wq(void *wq, struct mlx5_srq_attr *in)
88 {
89 if (MLX5_GET(wq, wq, wq_signature))
90 in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
91 in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz);
92 in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4;
93 in->log_size = MLX5_GET(wq, wq, log_wq_sz);
94 in->page_offset = MLX5_GET(wq, wq, page_offset);
95 in->lwm = MLX5_GET(wq, wq, lwm);
96 in->pd = MLX5_GET(wq, wq, pd);
97 in->db_record = MLX5_GET64(wq, wq, dbr_addr);
98 }
99
get_srqc(void * srqc,struct mlx5_srq_attr * in)100 static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
101 {
102 if (MLX5_GET(srqc, srqc, wq_signature))
103 in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
104 in->log_page_size = MLX5_GET(srqc, srqc, log_page_size);
105 in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride);
106 in->log_size = MLX5_GET(srqc, srqc, log_srq_size);
107 in->page_offset = MLX5_GET(srqc, srqc, page_offset);
108 in->lwm = MLX5_GET(srqc, srqc, lwm);
109 in->pd = MLX5_GET(srqc, srqc, pd);
110 in->db_record = MLX5_GET64(srqc, srqc, dbr_addr);
111 }
112
mlx5_core_get_srq(struct mlx5_core_dev * dev,u32 srqn)113 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
114 {
115 struct mlx5_srq_table *table = &dev->priv.srq_table;
116 struct mlx5_core_srq *srq;
117
118 spin_lock(&table->lock);
119
120 srq = radix_tree_lookup(&table->tree, srqn);
121 if (srq)
122 atomic_inc(&srq->refcount);
123
124 spin_unlock(&table->lock);
125
126 return srq;
127 }
128 EXPORT_SYMBOL(mlx5_core_get_srq);
129
get_pas_size(struct mlx5_srq_attr * in)130 static int get_pas_size(struct mlx5_srq_attr *in)
131 {
132 u32 log_page_size = in->log_page_size + 12;
133 u32 log_srq_size = in->log_size;
134 u32 log_rq_stride = in->wqe_shift;
135 u32 page_offset = in->page_offset;
136 u32 po_quanta = 1 << (log_page_size - 6);
137 u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
138 u32 page_size = 1 << log_page_size;
139 u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
140 u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size;
141
142 return rq_num_pas * sizeof(u64);
143
144 }
145
create_rmp_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * in)146 static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
147 struct mlx5_srq_attr *in)
148 {
149 void *create_in;
150 void *rmpc;
151 void *wq;
152 int pas_size;
153 int inlen;
154 int err;
155
156 pas_size = get_pas_size(in);
157 inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
158 create_in = mlx5_vzalloc(inlen);
159 if (!create_in)
160 return -ENOMEM;
161
162 rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
163 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
164
165 MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
166 set_wq(wq, in);
167 memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
168
169 err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
170
171 kvfree(create_in);
172 return err;
173 }
174
destroy_rmp_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq)175 static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
176 struct mlx5_core_srq *srq)
177 {
178 return mlx5_core_destroy_rmp(dev, srq->srqn);
179 }
180
query_rmp_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * out)181 static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
182 struct mlx5_srq_attr *out)
183 {
184 u32 *rmp_out;
185 void *rmpc;
186 int err;
187
188 rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
189 if (!rmp_out)
190 return -ENOMEM;
191
192 err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
193 if (err)
194 goto out;
195
196 rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
197 get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
198 if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
199 out->flags |= MLX5_SRQ_FLAG_ERR;
200
201 out:
202 kvfree(rmp_out);
203 return 0;
204 }
205
arm_rmp_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,u16 lwm)206 static int arm_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm)
207 {
208 return mlx5_core_arm_rmp(dev, srq->srqn, lwm);
209 }
210
create_xrc_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * in)211 static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
212 struct mlx5_core_srq *srq,
213 struct mlx5_srq_attr *in)
214 {
215 void *create_in;
216 void *xrc_srqc;
217 void *pas;
218 int pas_size;
219 int inlen;
220 int err;
221
222 pas_size = get_pas_size(in);
223 inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
224 create_in = mlx5_vzalloc(inlen);
225 if (!create_in)
226 return -ENOMEM;
227
228 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in, xrc_srq_context_entry);
229 pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
230
231 set_srqc(xrc_srqc, in);
232 MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
233 memcpy(pas, in->pas, pas_size);
234
235 err = mlx5_core_create_xsrq(dev, create_in, inlen, &srq->srqn);
236 if (err)
237 goto out;
238
239 out:
240 kvfree(create_in);
241 return err;
242 }
243
destroy_xrc_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq)244 static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
245 struct mlx5_core_srq *srq)
246 {
247 return mlx5_core_destroy_xsrq(dev, srq->srqn);
248 }
249
query_xrc_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * out)250 static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
251 struct mlx5_core_srq *srq,
252 struct mlx5_srq_attr *out)
253 {
254 u32 *xrcsrq_out;
255 void *xrc_srqc;
256 int err;
257
258 xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
259 if (!xrcsrq_out)
260 return -ENOMEM;
261
262 err = mlx5_core_query_xsrq(dev, srq->srqn, xrcsrq_out);
263 if (err)
264 goto out;
265
266 xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
267 xrc_srq_context_entry);
268 get_srqc(xrc_srqc, out);
269 if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
270 out->flags |= MLX5_SRQ_FLAG_ERR;
271
272 out:
273 kvfree(xrcsrq_out);
274 return err;
275 }
276
arm_xrc_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,u16 lwm)277 static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
278 struct mlx5_core_srq *srq, u16 lwm)
279 {
280 return mlx5_core_arm_xsrq(dev, srq->srqn, lwm);
281 }
282
create_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * in)283 static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
284 struct mlx5_srq_attr *in)
285 {
286 u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
287 void *create_in;
288 void *srqc;
289 void *pas;
290 int pas_size;
291 int inlen;
292 int err;
293
294 pas_size = get_pas_size(in);
295 inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
296 create_in = mlx5_vzalloc(inlen);
297 if (!create_in)
298 return -ENOMEM;
299
300 srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
301 pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
302
303 set_srqc(srqc, in);
304 memcpy(pas, in->pas, pas_size);
305
306 MLX5_SET(create_srq_in, create_in, opcode, MLX5_CMD_OP_CREATE_SRQ);
307 err = mlx5_cmd_exec(dev, create_in, inlen, create_out, sizeof(create_out));
308 kvfree(create_in);
309 if (!err)
310 srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
311
312 return err;
313 }
314
destroy_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq)315 static int destroy_srq_cmd(struct mlx5_core_dev *dev,
316 struct mlx5_core_srq *srq)
317 {
318 u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
319 u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
320
321 MLX5_SET(destroy_srq_in, srq_in, opcode, MLX5_CMD_OP_DESTROY_SRQ);
322 MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
323
324 return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, sizeof(srq_out));
325 }
326
query_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * out)327 static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
328 struct mlx5_srq_attr *out)
329 {
330 u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
331 u32 *srq_out;
332 void *srqc;
333 int outlen = MLX5_ST_SZ_BYTES(query_srq_out);
334 int err;
335
336 srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out));
337 if (!srq_out)
338 return -ENOMEM;
339
340 MLX5_SET(query_srq_in, srq_in, opcode, MLX5_CMD_OP_QUERY_SRQ);
341 MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
342 err = mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, outlen);
343 if (err)
344 goto out;
345
346 srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
347 get_srqc(srqc, out);
348 if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
349 out->flags |= MLX5_SRQ_FLAG_ERR;
350 out:
351 kvfree(srq_out);
352 return err;
353 }
354
arm_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,u16 lwm,int is_srq)355 static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
356 u16 lwm, int is_srq)
357 {
358 /* arm_srq structs missing using identical xrc ones */
359 u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
360 u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
361
362 MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
363 MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
364 MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm);
365
366 return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), srq_out, sizeof(srq_out));
367 }
368
create_srq_split(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * in)369 static int create_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
370 struct mlx5_srq_attr *in)
371 {
372 if (!dev->issi)
373 return create_srq_cmd(dev, srq, in);
374 else if (srq->common.res == MLX5_RES_XSRQ)
375 return create_xrc_srq_cmd(dev, srq, in);
376 else
377 return create_rmp_cmd(dev, srq, in);
378 }
379
destroy_srq_split(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq)380 static int destroy_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
381 {
382 if (!dev->issi)
383 return destroy_srq_cmd(dev, srq);
384 else if (srq->common.res == MLX5_RES_XSRQ)
385 return destroy_xrc_srq_cmd(dev, srq);
386 else
387 return destroy_rmp_cmd(dev, srq);
388 }
389
mlx5_core_create_srq(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * in)390 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
391 struct mlx5_srq_attr *in)
392 {
393 int err;
394 struct mlx5_srq_table *table = &dev->priv.srq_table;
395
396 if (in->type == IB_SRQT_XRC)
397 srq->common.res = MLX5_RES_XSRQ;
398 else
399 srq->common.res = MLX5_RES_SRQ;
400
401 err = create_srq_split(dev, srq, in);
402 if (err)
403 return err;
404
405 atomic_set(&srq->refcount, 1);
406 init_completion(&srq->free);
407
408 spin_lock_irq(&table->lock);
409 err = radix_tree_insert(&table->tree, srq->srqn, srq);
410 spin_unlock_irq(&table->lock);
411 if (err) {
412 mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
413 goto err_destroy_srq_split;
414 }
415
416 return 0;
417
418 err_destroy_srq_split:
419 destroy_srq_split(dev, srq);
420
421 return err;
422 }
423 EXPORT_SYMBOL(mlx5_core_create_srq);
424
mlx5_core_destroy_srq(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq)425 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
426 {
427 struct mlx5_srq_table *table = &dev->priv.srq_table;
428 struct mlx5_core_srq *tmp;
429 int err;
430
431 spin_lock_irq(&table->lock);
432 tmp = radix_tree_delete(&table->tree, srq->srqn);
433 spin_unlock_irq(&table->lock);
434 if (!tmp) {
435 mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
436 return -EINVAL;
437 }
438 if (tmp != srq) {
439 mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
440 return -EINVAL;
441 }
442
443 err = destroy_srq_split(dev, srq);
444 if (err)
445 return err;
446
447 if (atomic_dec_and_test(&srq->refcount))
448 complete(&srq->free);
449 wait_for_completion(&srq->free);
450
451 return 0;
452 }
453 EXPORT_SYMBOL(mlx5_core_destroy_srq);
454
mlx5_core_query_srq(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_srq_attr * out)455 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
456 struct mlx5_srq_attr *out)
457 {
458 if (!dev->issi)
459 return query_srq_cmd(dev, srq, out);
460 else if (srq->common.res == MLX5_RES_XSRQ)
461 return query_xrc_srq_cmd(dev, srq, out);
462 else
463 return query_rmp_cmd(dev, srq, out);
464 }
465 EXPORT_SYMBOL(mlx5_core_query_srq);
466
mlx5_core_arm_srq(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,u16 lwm,int is_srq)467 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
468 u16 lwm, int is_srq)
469 {
470 if (!dev->issi)
471 return arm_srq_cmd(dev, srq, lwm, is_srq);
472 else if (srq->common.res == MLX5_RES_XSRQ)
473 return arm_xrc_srq_cmd(dev, srq, lwm);
474 else
475 return arm_rmp_cmd(dev, srq, lwm);
476 }
477 EXPORT_SYMBOL(mlx5_core_arm_srq);
478
mlx5_init_srq_table(struct mlx5_core_dev * dev)479 void mlx5_init_srq_table(struct mlx5_core_dev *dev)
480 {
481 struct mlx5_srq_table *table = &dev->priv.srq_table;
482
483 memset(table, 0, sizeof(*table));
484 spin_lock_init(&table->lock);
485 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
486 }
487
mlx5_cleanup_srq_table(struct mlx5_core_dev * dev)488 void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
489 {
490 /* nothing */
491 }
492