xref: /linux/drivers/infiniband/hw/mlx5/qp.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/etherdevice.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_user_verbs.h>
37 #include <rdma/rdma_counter.h>
38 #include <linux/mlx5/fs.h>
39 #include "mlx5_ib.h"
40 #include "ib_rep.h"
41 #include "counters.h"
42 #include "cmd.h"
43 #include "umr.h"
44 #include "qp.h"
45 #include "wr.h"
46 
47 enum {
48 	MLX5_IB_ACK_REQ_FREQ	= 8,
49 };
50 
51 enum {
52 	MLX5_IB_DEFAULT_SCHED_QUEUE	= 0x83,
53 	MLX5_IB_DEFAULT_QP0_SCHED_QUEUE	= 0x3f,
54 	MLX5_IB_LINK_TYPE_IB		= 0,
55 	MLX5_IB_LINK_TYPE_ETH		= 1
56 };
57 
58 enum raw_qp_set_mask_map {
59 	MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID		= 1UL << 0,
60 	MLX5_RAW_QP_RATE_LIMIT			= 1UL << 1,
61 };
62 
63 enum {
64 	MLX5_QP_RM_GO_BACK_N			= 0x1,
65 };
66 
67 struct mlx5_modify_raw_qp_param {
68 	u16 operation;
69 
70 	u32 set_mask; /* raw_qp_set_mask_map */
71 
72 	struct mlx5_rate_limit rl;
73 
74 	u8 rq_q_ctr_id;
75 	u32 port;
76 };
77 
78 struct mlx5_ib_qp_event_work {
79 	struct work_struct work;
80 	struct mlx5_core_qp *qp;
81 	int type;
82 };
83 
84 static struct workqueue_struct *mlx5_ib_qp_event_wq;
85 
86 static void get_cqs(enum ib_qp_type qp_type,
87 		    struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
88 		    struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);
89 
90 static int is_qp0(enum ib_qp_type qp_type)
91 {
92 	return qp_type == IB_QPT_SMI;
93 }
94 
95 static int is_sqp(enum ib_qp_type qp_type)
96 {
97 	return is_qp0(qp_type) || is_qp1(qp_type);
98 }
99 
100 /**
101  * mlx5_ib_read_user_wqe_common() - Copy a WQE (or part of) from user WQ
102  * to kernel buffer
103  *
104  * @umem: User space memory where the WQ is
105  * @buffer: buffer to copy to
106  * @buflen: buffer length
107  * @wqe_index: index of WQE to copy from
108  * @wq_offset: offset to start of WQ
109  * @wq_wqe_cnt: number of WQEs in WQ
110  * @wq_wqe_shift: log2 of WQE size
111  * @bcnt: number of bytes to copy
112  * @bytes_copied: number of bytes to copy (return value)
113  *
114  * Copies from start of WQE bcnt or less bytes.
115  * Does not gurantee to copy the entire WQE.
116  *
117  * Return: zero on success, or an error code.
118  */
119 static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem, void *buffer,
120 					size_t buflen, int wqe_index,
121 					int wq_offset, int wq_wqe_cnt,
122 					int wq_wqe_shift, int bcnt,
123 					size_t *bytes_copied)
124 {
125 	size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift);
126 	size_t wq_end = wq_offset + (wq_wqe_cnt << wq_wqe_shift);
127 	size_t copy_length;
128 	int ret;
129 
130 	/* don't copy more than requested, more than buffer length or
131 	 * beyond WQ end
132 	 */
133 	copy_length = min_t(u32, buflen, wq_end - offset);
134 	copy_length = min_t(u32, copy_length, bcnt);
135 
136 	ret = ib_umem_copy_from(buffer, umem, offset, copy_length);
137 	if (ret)
138 		return ret;
139 
140 	if (!ret && bytes_copied)
141 		*bytes_copied = copy_length;
142 
143 	return 0;
144 }
145 
146 static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
147 				      void *buffer, size_t buflen, size_t *bc)
148 {
149 	struct mlx5_wqe_ctrl_seg *ctrl;
150 	size_t bytes_copied = 0;
151 	size_t wqe_length;
152 	void *p;
153 	int ds;
154 
155 	wqe_index = wqe_index & qp->sq.fbc.sz_m1;
156 
157 	/* read the control segment first */
158 	p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
159 	ctrl = p;
160 	ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
161 	wqe_length = ds * MLX5_WQE_DS_UNITS;
162 
163 	/* read rest of WQE if it spreads over more than one stride */
164 	while (bytes_copied < wqe_length) {
165 		size_t copy_length =
166 			min_t(size_t, buflen - bytes_copied, MLX5_SEND_WQE_BB);
167 
168 		if (!copy_length)
169 			break;
170 
171 		memcpy(buffer + bytes_copied, p, copy_length);
172 		bytes_copied += copy_length;
173 
174 		wqe_index = (wqe_index + 1) & qp->sq.fbc.sz_m1;
175 		p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
176 	}
177 	*bc = bytes_copied;
178 	return 0;
179 }
180 
181 static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
182 				    void *buffer, size_t buflen, size_t *bc)
183 {
184 	struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
185 	struct ib_umem *umem = base->ubuffer.umem;
186 	struct mlx5_ib_wq *wq = &qp->sq;
187 	struct mlx5_wqe_ctrl_seg *ctrl;
188 	size_t bytes_copied;
189 	size_t bytes_copied2;
190 	size_t wqe_length;
191 	int ret;
192 	int ds;
193 
194 	/* at first read as much as possible */
195 	ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
196 					   wq->offset, wq->wqe_cnt,
197 					   wq->wqe_shift, buflen,
198 					   &bytes_copied);
199 	if (ret)
200 		return ret;
201 
202 	/* we need at least control segment size to proceed */
203 	if (bytes_copied < sizeof(*ctrl))
204 		return -EINVAL;
205 
206 	ctrl = buffer;
207 	ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
208 	wqe_length = ds * MLX5_WQE_DS_UNITS;
209 
210 	/* if we copied enough then we are done */
211 	if (bytes_copied >= wqe_length) {
212 		*bc = bytes_copied;
213 		return 0;
214 	}
215 
216 	/* otherwise this a wrapped around wqe
217 	 * so read the remaining bytes starting
218 	 * from  wqe_index 0
219 	 */
220 	ret = mlx5_ib_read_user_wqe_common(umem, buffer + bytes_copied,
221 					   buflen - bytes_copied, 0, wq->offset,
222 					   wq->wqe_cnt, wq->wqe_shift,
223 					   wqe_length - bytes_copied,
224 					   &bytes_copied2);
225 
226 	if (ret)
227 		return ret;
228 	*bc = bytes_copied + bytes_copied2;
229 	return 0;
230 }
231 
232 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
233 			size_t buflen, size_t *bc)
234 {
235 	struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
236 	struct ib_umem *umem = base->ubuffer.umem;
237 
238 	if (buflen < sizeof(struct mlx5_wqe_ctrl_seg))
239 		return -EINVAL;
240 
241 	if (!umem)
242 		return mlx5_ib_read_kernel_wqe_sq(qp, wqe_index, buffer,
243 						  buflen, bc);
244 
245 	return mlx5_ib_read_user_wqe_sq(qp, wqe_index, buffer, buflen, bc);
246 }
247 
248 static int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index,
249 				    void *buffer, size_t buflen, size_t *bc)
250 {
251 	struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
252 	struct ib_umem *umem = base->ubuffer.umem;
253 	struct mlx5_ib_wq *wq = &qp->rq;
254 	size_t bytes_copied;
255 	int ret;
256 
257 	ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
258 					   wq->offset, wq->wqe_cnt,
259 					   wq->wqe_shift, buflen,
260 					   &bytes_copied);
261 
262 	if (ret)
263 		return ret;
264 	*bc = bytes_copied;
265 	return 0;
266 }
267 
268 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
269 			size_t buflen, size_t *bc)
270 {
271 	struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
272 	struct ib_umem *umem = base->ubuffer.umem;
273 	struct mlx5_ib_wq *wq = &qp->rq;
274 	size_t wqe_size = 1 << wq->wqe_shift;
275 
276 	if (buflen < wqe_size)
277 		return -EINVAL;
278 
279 	if (!umem)
280 		return -EOPNOTSUPP;
281 
282 	return mlx5_ib_read_user_wqe_rq(qp, wqe_index, buffer, buflen, bc);
283 }
284 
285 static int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
286 				     void *buffer, size_t buflen, size_t *bc)
287 {
288 	struct ib_umem *umem = srq->umem;
289 	size_t bytes_copied;
290 	int ret;
291 
292 	ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 0,
293 					   srq->msrq.max, srq->msrq.wqe_shift,
294 					   buflen, &bytes_copied);
295 
296 	if (ret)
297 		return ret;
298 	*bc = bytes_copied;
299 	return 0;
300 }
301 
302 int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
303 			 size_t buflen, size_t *bc)
304 {
305 	struct ib_umem *umem = srq->umem;
306 	size_t wqe_size = 1 << srq->msrq.wqe_shift;
307 
308 	if (buflen < wqe_size)
309 		return -EINVAL;
310 
311 	if (!umem)
312 		return -EOPNOTSUPP;
313 
314 	return mlx5_ib_read_user_wqe_srq(srq, wqe_index, buffer, buflen, bc);
315 }
316 
317 static void mlx5_ib_qp_err_syndrome(struct ib_qp *ibqp)
318 {
319 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
320 	int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
321 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
322 	void *pas_ext_union, *err_syn;
323 	u32 *outb;
324 	int err;
325 
326 	if (!MLX5_CAP_GEN(dev->mdev, qpc_extension) ||
327 	    !MLX5_CAP_GEN(dev->mdev, qp_error_syndrome))
328 		return;
329 
330 	outb = kzalloc(outlen, GFP_KERNEL);
331 	if (!outb)
332 		return;
333 
334 	err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen,
335 				 true);
336 	if (err)
337 		goto out;
338 
339 	pas_ext_union =
340 		MLX5_ADDR_OF(query_qp_out, outb, qp_pas_or_qpc_ext_and_pas);
341 	err_syn = MLX5_ADDR_OF(qpc_extension_and_pas_list_in, pas_ext_union,
342 			       qpc_data_extension.error_syndrome);
343 
344 	pr_err("%s/%d: QP %d error: %s (0x%x 0x%x 0x%x)\n",
345 	       ibqp->device->name, ibqp->port, ibqp->qp_num,
346 	       ib_wc_status_msg(
347 		       MLX5_GET(cqe_error_syndrome, err_syn, syndrome)),
348 	       MLX5_GET(cqe_error_syndrome, err_syn, vendor_error_syndrome),
349 	       MLX5_GET(cqe_error_syndrome, err_syn, hw_syndrome_type),
350 	       MLX5_GET(cqe_error_syndrome, err_syn, hw_error_syndrome));
351 out:
352 	kfree(outb);
353 }
354 
355 static void mlx5_ib_handle_qp_event(struct work_struct *_work)
356 {
357 	struct mlx5_ib_qp_event_work *qpe_work =
358 		container_of(_work, struct mlx5_ib_qp_event_work, work);
359 	struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp;
360 	struct ib_event event = {};
361 
362 	event.device = ibqp->device;
363 	event.element.qp = ibqp;
364 	switch (qpe_work->type) {
365 	case MLX5_EVENT_TYPE_PATH_MIG:
366 		event.event = IB_EVENT_PATH_MIG;
367 		break;
368 	case MLX5_EVENT_TYPE_COMM_EST:
369 		event.event = IB_EVENT_COMM_EST;
370 		break;
371 	case MLX5_EVENT_TYPE_SQ_DRAINED:
372 		event.event = IB_EVENT_SQ_DRAINED;
373 		break;
374 	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
375 		event.event = IB_EVENT_QP_LAST_WQE_REACHED;
376 		break;
377 	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
378 		event.event = IB_EVENT_QP_FATAL;
379 		break;
380 	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
381 		event.event = IB_EVENT_PATH_MIG_ERR;
382 		break;
383 	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
384 		event.event = IB_EVENT_QP_REQ_ERR;
385 		break;
386 	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
387 		event.event = IB_EVENT_QP_ACCESS_ERR;
388 		break;
389 	default:
390 		pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n",
391 			qpe_work->type, qpe_work->qp->qpn);
392 		goto out;
393 	}
394 
395 	if ((event.event == IB_EVENT_QP_FATAL) ||
396 	    (event.event == IB_EVENT_QP_ACCESS_ERR))
397 		mlx5_ib_qp_err_syndrome(ibqp);
398 
399 	ibqp->event_handler(&event, ibqp->qp_context);
400 
401 out:
402 	mlx5_core_res_put(&qpe_work->qp->common);
403 	kfree(qpe_work);
404 }
405 
406 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
407 {
408 	struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
409 	struct mlx5_ib_qp_event_work *qpe_work;
410 
411 	if (type == MLX5_EVENT_TYPE_PATH_MIG) {
412 		/* This event is only valid for trans_qps */
413 		to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port;
414 	}
415 
416 	if (!ibqp->event_handler)
417 		goto out_no_handler;
418 
419 	qpe_work = kzalloc(sizeof(*qpe_work), GFP_ATOMIC);
420 	if (!qpe_work)
421 		goto out_no_handler;
422 
423 	qpe_work->qp = qp;
424 	qpe_work->type = type;
425 	INIT_WORK(&qpe_work->work, mlx5_ib_handle_qp_event);
426 	queue_work(mlx5_ib_qp_event_wq, &qpe_work->work);
427 	return;
428 
429 out_no_handler:
430 	mlx5_core_res_put(&qp->common);
431 }
432 
433 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
434 		       int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
435 {
436 	int wqe_size;
437 	int wq_size;
438 
439 	/* Sanity check RQ size before proceeding */
440 	if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
441 		return -EINVAL;
442 
443 	if (!has_rq) {
444 		qp->rq.max_gs = 0;
445 		qp->rq.wqe_cnt = 0;
446 		qp->rq.wqe_shift = 0;
447 		cap->max_recv_wr = 0;
448 		cap->max_recv_sge = 0;
449 	} else {
450 		int wq_sig = !!(qp->flags_en & MLX5_QP_FLAG_SIGNATURE);
451 
452 		if (ucmd) {
453 			qp->rq.wqe_cnt = ucmd->rq_wqe_count;
454 			if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
455 				return -EINVAL;
456 			qp->rq.wqe_shift = ucmd->rq_wqe_shift;
457 			if ((1 << qp->rq.wqe_shift) /
458 				    sizeof(struct mlx5_wqe_data_seg) <
459 			    wq_sig)
460 				return -EINVAL;
461 			qp->rq.max_gs =
462 				(1 << qp->rq.wqe_shift) /
463 					sizeof(struct mlx5_wqe_data_seg) -
464 				wq_sig;
465 			qp->rq.max_post = qp->rq.wqe_cnt;
466 		} else {
467 			wqe_size =
468 				wq_sig ? sizeof(struct mlx5_wqe_signature_seg) :
469 					 0;
470 			wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
471 			wqe_size = roundup_pow_of_two(wqe_size);
472 			wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
473 			wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
474 			qp->rq.wqe_cnt = wq_size / wqe_size;
475 			if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
476 				mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
477 					    wqe_size,
478 					    MLX5_CAP_GEN(dev->mdev,
479 							 max_wqe_sz_rq));
480 				return -EINVAL;
481 			}
482 			qp->rq.wqe_shift = ilog2(wqe_size);
483 			qp->rq.max_gs =
484 				(1 << qp->rq.wqe_shift) /
485 					sizeof(struct mlx5_wqe_data_seg) -
486 				wq_sig;
487 			qp->rq.max_post = qp->rq.wqe_cnt;
488 		}
489 	}
490 
491 	return 0;
492 }
493 
494 static int sq_overhead(struct ib_qp_init_attr *attr)
495 {
496 	int size = 0;
497 
498 	switch (attr->qp_type) {
499 	case IB_QPT_XRC_INI:
500 		size += sizeof(struct mlx5_wqe_xrc_seg);
501 		fallthrough;
502 	case IB_QPT_RC:
503 		size += sizeof(struct mlx5_wqe_ctrl_seg) +
504 			max(sizeof(struct mlx5_wqe_atomic_seg) +
505 			    sizeof(struct mlx5_wqe_raddr_seg),
506 			    sizeof(struct mlx5_wqe_umr_ctrl_seg) +
507 			    sizeof(struct mlx5_mkey_seg) +
508 			    MLX5_IB_SQ_UMR_INLINE_THRESHOLD /
509 			    MLX5_IB_UMR_OCTOWORD);
510 		break;
511 
512 	case IB_QPT_XRC_TGT:
513 		return 0;
514 
515 	case IB_QPT_UC:
516 		size += sizeof(struct mlx5_wqe_ctrl_seg) +
517 			max(sizeof(struct mlx5_wqe_raddr_seg),
518 			    sizeof(struct mlx5_wqe_umr_ctrl_seg) +
519 			    sizeof(struct mlx5_mkey_seg));
520 		break;
521 
522 	case IB_QPT_UD:
523 		if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
524 			size += sizeof(struct mlx5_wqe_eth_pad) +
525 				sizeof(struct mlx5_wqe_eth_seg);
526 		fallthrough;
527 	case IB_QPT_SMI:
528 	case MLX5_IB_QPT_HW_GSI:
529 		size += sizeof(struct mlx5_wqe_ctrl_seg) +
530 			sizeof(struct mlx5_wqe_datagram_seg);
531 		break;
532 
533 	case MLX5_IB_QPT_REG_UMR:
534 		size += sizeof(struct mlx5_wqe_ctrl_seg) +
535 			sizeof(struct mlx5_wqe_umr_ctrl_seg) +
536 			sizeof(struct mlx5_mkey_seg);
537 		break;
538 
539 	default:
540 		return -EINVAL;
541 	}
542 
543 	return size;
544 }
545 
546 static int calc_send_wqe(struct ib_qp_init_attr *attr)
547 {
548 	int inl_size = 0;
549 	int size;
550 
551 	size = sq_overhead(attr);
552 	if (size < 0)
553 		return size;
554 
555 	if (attr->cap.max_inline_data) {
556 		inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
557 			attr->cap.max_inline_data;
558 	}
559 
560 	size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
561 	if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN &&
562 	    ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
563 		return MLX5_SIG_WQE_SIZE;
564 	else
565 		return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
566 }
567 
568 static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size)
569 {
570 	int max_sge;
571 
572 	if (attr->qp_type == IB_QPT_RC)
573 		max_sge = (min_t(int, wqe_size, 512) -
574 			   sizeof(struct mlx5_wqe_ctrl_seg) -
575 			   sizeof(struct mlx5_wqe_raddr_seg)) /
576 			sizeof(struct mlx5_wqe_data_seg);
577 	else if (attr->qp_type == IB_QPT_XRC_INI)
578 		max_sge = (min_t(int, wqe_size, 512) -
579 			   sizeof(struct mlx5_wqe_ctrl_seg) -
580 			   sizeof(struct mlx5_wqe_xrc_seg) -
581 			   sizeof(struct mlx5_wqe_raddr_seg)) /
582 			sizeof(struct mlx5_wqe_data_seg);
583 	else
584 		max_sge = (wqe_size - sq_overhead(attr)) /
585 			sizeof(struct mlx5_wqe_data_seg);
586 
587 	return min_t(int, max_sge, wqe_size - sq_overhead(attr) /
588 		     sizeof(struct mlx5_wqe_data_seg));
589 }
590 
591 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
592 			struct mlx5_ib_qp *qp)
593 {
594 	int wqe_size;
595 	int wq_size;
596 
597 	if (!attr->cap.max_send_wr)
598 		return 0;
599 
600 	wqe_size = calc_send_wqe(attr);
601 	mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
602 	if (wqe_size < 0)
603 		return wqe_size;
604 
605 	if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
606 		mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
607 			    wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
608 		return -EINVAL;
609 	}
610 
611 	qp->max_inline_data = wqe_size - sq_overhead(attr) -
612 			      sizeof(struct mlx5_wqe_inline_seg);
613 	attr->cap.max_inline_data = qp->max_inline_data;
614 
615 	wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
616 	qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
617 	if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
618 		mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
619 			    attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB,
620 			    qp->sq.wqe_cnt,
621 			    1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
622 		return -ENOMEM;
623 	}
624 	qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
625 	qp->sq.max_gs = get_send_sge(attr, wqe_size);
626 	if (qp->sq.max_gs < attr->cap.max_send_sge)
627 		return -ENOMEM;
628 
629 	attr->cap.max_send_sge = qp->sq.max_gs;
630 	qp->sq.max_post = wq_size / wqe_size;
631 	attr->cap.max_send_wr = qp->sq.max_post;
632 
633 	return wq_size;
634 }
635 
636 static int set_user_buf_size(struct mlx5_ib_dev *dev,
637 			    struct mlx5_ib_qp *qp,
638 			    struct mlx5_ib_create_qp *ucmd,
639 			    struct mlx5_ib_qp_base *base,
640 			    struct ib_qp_init_attr *attr)
641 {
642 	int desc_sz = 1 << qp->sq.wqe_shift;
643 
644 	if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
645 		mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
646 			     desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
647 		return -EINVAL;
648 	}
649 
650 	if (ucmd->sq_wqe_count && !is_power_of_2(ucmd->sq_wqe_count)) {
651 		mlx5_ib_warn(dev, "sq_wqe_count %d is not a power of two\n",
652 			     ucmd->sq_wqe_count);
653 		return -EINVAL;
654 	}
655 
656 	qp->sq.wqe_cnt = ucmd->sq_wqe_count;
657 
658 	if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
659 		mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
660 			     qp->sq.wqe_cnt,
661 			     1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
662 		return -EINVAL;
663 	}
664 
665 	if (attr->qp_type == IB_QPT_RAW_PACKET ||
666 	    qp->flags & IB_QP_CREATE_SOURCE_QPN) {
667 		base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift;
668 		qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6;
669 	} else {
670 		base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
671 					 (qp->sq.wqe_cnt << 6);
672 	}
673 
674 	return 0;
675 }
676 
677 static int qp_has_rq(struct ib_qp_init_attr *attr)
678 {
679 	if (attr->qp_type == IB_QPT_XRC_INI ||
680 	    attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
681 	    attr->qp_type == MLX5_IB_QPT_REG_UMR ||
682 	    !attr->cap.max_recv_wr)
683 		return 0;
684 
685 	return 1;
686 }
687 
688 enum {
689 	/* this is the first blue flame register in the array of bfregs assigned
690 	 * to a processes. Since we do not use it for blue flame but rather
691 	 * regular 64 bit doorbells, we do not need a lock for maintaiing
692 	 * "odd/even" order
693 	 */
694 	NUM_NON_BLUE_FLAME_BFREGS = 1,
695 };
696 
697 static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
698 {
699 	return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
700 	       bfregi->num_static_sys_pages * MLX5_NON_FP_BFREGS_PER_UAR;
701 }
702 
703 static int num_med_bfreg(struct mlx5_ib_dev *dev,
704 			 struct mlx5_bfreg_info *bfregi)
705 {
706 	int n;
707 
708 	n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs -
709 	    NUM_NON_BLUE_FLAME_BFREGS;
710 
711 	return n >= 0 ? n : 0;
712 }
713 
714 static int first_med_bfreg(struct mlx5_ib_dev *dev,
715 			   struct mlx5_bfreg_info *bfregi)
716 {
717 	return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM;
718 }
719 
720 static int first_hi_bfreg(struct mlx5_ib_dev *dev,
721 			  struct mlx5_bfreg_info *bfregi)
722 {
723 	int med;
724 
725 	med = num_med_bfreg(dev, bfregi);
726 	return ++med;
727 }
728 
729 static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
730 				  struct mlx5_bfreg_info *bfregi)
731 {
732 	int i;
733 
734 	for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) {
735 		if (!bfregi->count[i]) {
736 			bfregi->count[i]++;
737 			return i;
738 		}
739 	}
740 
741 	return -ENOMEM;
742 }
743 
744 static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
745 				 struct mlx5_bfreg_info *bfregi)
746 {
747 	int minidx = first_med_bfreg(dev, bfregi);
748 	int i;
749 
750 	if (minidx < 0)
751 		return minidx;
752 
753 	for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) {
754 		if (bfregi->count[i] < bfregi->count[minidx])
755 			minidx = i;
756 		if (!bfregi->count[minidx])
757 			break;
758 	}
759 
760 	bfregi->count[minidx]++;
761 	return minidx;
762 }
763 
764 static int alloc_bfreg(struct mlx5_ib_dev *dev,
765 		       struct mlx5_bfreg_info *bfregi)
766 {
767 	int bfregn = -ENOMEM;
768 
769 	if (bfregi->lib_uar_dyn)
770 		return -EINVAL;
771 
772 	mutex_lock(&bfregi->lock);
773 	if (bfregi->ver >= 2) {
774 		bfregn = alloc_high_class_bfreg(dev, bfregi);
775 		if (bfregn < 0)
776 			bfregn = alloc_med_class_bfreg(dev, bfregi);
777 	}
778 
779 	if (bfregn < 0) {
780 		BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1);
781 		bfregn = 0;
782 		bfregi->count[bfregn]++;
783 	}
784 	mutex_unlock(&bfregi->lock);
785 
786 	return bfregn;
787 }
788 
789 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
790 {
791 	mutex_lock(&bfregi->lock);
792 	bfregi->count[bfregn]--;
793 	mutex_unlock(&bfregi->lock);
794 }
795 
796 static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
797 {
798 	switch (state) {
799 	case IB_QPS_RESET:	return MLX5_QP_STATE_RST;
800 	case IB_QPS_INIT:	return MLX5_QP_STATE_INIT;
801 	case IB_QPS_RTR:	return MLX5_QP_STATE_RTR;
802 	case IB_QPS_RTS:	return MLX5_QP_STATE_RTS;
803 	case IB_QPS_SQD:	return MLX5_QP_STATE_SQD;
804 	case IB_QPS_SQE:	return MLX5_QP_STATE_SQER;
805 	case IB_QPS_ERR:	return MLX5_QP_STATE_ERR;
806 	default:		return -1;
807 	}
808 }
809 
810 static int to_mlx5_st(enum ib_qp_type type)
811 {
812 	switch (type) {
813 	case IB_QPT_RC:			return MLX5_QP_ST_RC;
814 	case IB_QPT_UC:			return MLX5_QP_ST_UC;
815 	case IB_QPT_UD:			return MLX5_QP_ST_UD;
816 	case MLX5_IB_QPT_REG_UMR:	return MLX5_QP_ST_REG_UMR;
817 	case IB_QPT_XRC_INI:
818 	case IB_QPT_XRC_TGT:		return MLX5_QP_ST_XRC;
819 	case IB_QPT_SMI:		return MLX5_QP_ST_QP0;
820 	case MLX5_IB_QPT_HW_GSI:	return MLX5_QP_ST_QP1;
821 	case MLX5_IB_QPT_DCI:		return MLX5_QP_ST_DCI;
822 	case IB_QPT_RAW_PACKET:		return MLX5_QP_ST_RAW_ETHERTYPE;
823 	default:		return -EINVAL;
824 	}
825 }
826 
827 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
828 			     struct mlx5_ib_cq *recv_cq);
829 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
830 			       struct mlx5_ib_cq *recv_cq);
831 
832 int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
833 			struct mlx5_bfreg_info *bfregi, u32 bfregn,
834 			bool dyn_bfreg)
835 {
836 	unsigned int bfregs_per_sys_page;
837 	u32 index_of_sys_page;
838 	u32 offset;
839 
840 	if (bfregi->lib_uar_dyn)
841 		return -EINVAL;
842 
843 	bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
844 				MLX5_NON_FP_BFREGS_PER_UAR;
845 	index_of_sys_page = bfregn / bfregs_per_sys_page;
846 
847 	if (dyn_bfreg) {
848 		index_of_sys_page += bfregi->num_static_sys_pages;
849 
850 		if (index_of_sys_page >= bfregi->num_sys_pages)
851 			return -EINVAL;
852 
853 		if (bfregn > bfregi->num_dyn_bfregs ||
854 		    bfregi->sys_pages[index_of_sys_page] == MLX5_IB_INVALID_UAR_INDEX) {
855 			mlx5_ib_dbg(dev, "Invalid dynamic uar index\n");
856 			return -EINVAL;
857 		}
858 	}
859 
860 	offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
861 	return bfregi->sys_pages[index_of_sys_page] + offset;
862 }
863 
864 static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
865 			    struct mlx5_ib_rwq *rwq, struct ib_udata *udata)
866 {
867 	struct mlx5_ib_ucontext *context =
868 		rdma_udata_to_drv_context(
869 			udata,
870 			struct mlx5_ib_ucontext,
871 			ibucontext);
872 
873 	if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP)
874 		atomic_dec(&dev->delay_drop.rqs_cnt);
875 
876 	mlx5_ib_db_unmap_user(context, &rwq->db);
877 	ib_umem_release(rwq->umem);
878 }
879 
880 static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
881 			  struct ib_udata *udata, struct mlx5_ib_rwq *rwq,
882 			  struct mlx5_ib_create_wq *ucmd)
883 {
884 	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
885 		udata, struct mlx5_ib_ucontext, ibucontext);
886 	unsigned long page_size = 0;
887 	u32 offset = 0;
888 	int err;
889 
890 	if (!ucmd->buf_addr)
891 		return -EINVAL;
892 
893 	rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0);
894 	if (IS_ERR(rwq->umem)) {
895 		mlx5_ib_dbg(dev, "umem_get failed\n");
896 		err = PTR_ERR(rwq->umem);
897 		return err;
898 	}
899 
900 	page_size = mlx5_umem_find_best_quantized_pgoff(
901 		rwq->umem, wq, log_wq_pg_sz, MLX5_ADAPTER_PAGE_SHIFT,
902 		page_offset, 64, &rwq->rq_page_offset);
903 	if (!page_size) {
904 		mlx5_ib_warn(dev, "bad offset\n");
905 		err = -EINVAL;
906 		goto err_umem;
907 	}
908 
909 	rwq->rq_num_pas = ib_umem_num_dma_blocks(rwq->umem, page_size);
910 	rwq->page_shift = order_base_2(page_size);
911 	rwq->log_page_size =  rwq->page_shift - MLX5_ADAPTER_PAGE_SHIFT;
912 	rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE);
913 
914 	mlx5_ib_dbg(
915 		dev,
916 		"addr 0x%llx, size %zd, npages %zu, page_size %ld, ncont %d, offset %d\n",
917 		(unsigned long long)ucmd->buf_addr, rwq->buf_size,
918 		ib_umem_num_pages(rwq->umem), page_size, rwq->rq_num_pas,
919 		offset);
920 
921 	err = mlx5_ib_db_map_user(ucontext, ucmd->db_addr, &rwq->db);
922 	if (err) {
923 		mlx5_ib_dbg(dev, "map failed\n");
924 		goto err_umem;
925 	}
926 
927 	return 0;
928 
929 err_umem:
930 	ib_umem_release(rwq->umem);
931 	return err;
932 }
933 
934 static int adjust_bfregn(struct mlx5_ib_dev *dev,
935 			 struct mlx5_bfreg_info *bfregi, int bfregn)
936 {
937 	return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR +
938 				bfregn % MLX5_NON_FP_BFREGS_PER_UAR;
939 }
940 
941 static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
942 			   struct mlx5_ib_qp *qp, struct ib_udata *udata,
943 			   struct ib_qp_init_attr *attr, u32 **in,
944 			   struct mlx5_ib_create_qp_resp *resp, int *inlen,
945 			   struct mlx5_ib_qp_base *base,
946 			   struct mlx5_ib_create_qp *ucmd)
947 {
948 	struct mlx5_ib_ucontext *context;
949 	struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
950 	unsigned int page_offset_quantized = 0;
951 	unsigned long page_size = 0;
952 	int uar_index = 0;
953 	int bfregn;
954 	int ncont = 0;
955 	__be64 *pas;
956 	void *qpc;
957 	int err;
958 	u16 uid;
959 	u32 uar_flags;
960 
961 	context = rdma_udata_to_drv_context(udata, struct mlx5_ib_ucontext,
962 					    ibucontext);
963 	uar_flags = qp->flags_en &
964 		    (MLX5_QP_FLAG_UAR_PAGE_INDEX | MLX5_QP_FLAG_BFREG_INDEX);
965 	switch (uar_flags) {
966 	case MLX5_QP_FLAG_UAR_PAGE_INDEX:
967 		uar_index = ucmd->bfreg_index;
968 		bfregn = MLX5_IB_INVALID_BFREG;
969 		break;
970 	case MLX5_QP_FLAG_BFREG_INDEX:
971 		uar_index = bfregn_to_uar_index(dev, &context->bfregi,
972 						ucmd->bfreg_index, true);
973 		if (uar_index < 0)
974 			return uar_index;
975 		bfregn = MLX5_IB_INVALID_BFREG;
976 		break;
977 	case 0:
978 		if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
979 			return -EINVAL;
980 		bfregn = alloc_bfreg(dev, &context->bfregi);
981 		if (bfregn < 0)
982 			return bfregn;
983 		break;
984 	default:
985 		return -EINVAL;
986 	}
987 
988 	mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
989 	if (bfregn != MLX5_IB_INVALID_BFREG)
990 		uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn,
991 						false);
992 
993 	qp->rq.offset = 0;
994 	qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
995 	qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
996 
997 	err = set_user_buf_size(dev, qp, ucmd, base, attr);
998 	if (err)
999 		goto err_bfreg;
1000 
1001 	if (ucmd->buf_addr && ubuffer->buf_size) {
1002 		ubuffer->buf_addr = ucmd->buf_addr;
1003 		ubuffer->umem = ib_umem_get(&dev->ib_dev, ubuffer->buf_addr,
1004 					    ubuffer->buf_size, 0);
1005 		if (IS_ERR(ubuffer->umem)) {
1006 			err = PTR_ERR(ubuffer->umem);
1007 			goto err_bfreg;
1008 		}
1009 		page_size = mlx5_umem_find_best_quantized_pgoff(
1010 			ubuffer->umem, qpc, log_page_size,
1011 			MLX5_ADAPTER_PAGE_SHIFT, page_offset, 64,
1012 			&page_offset_quantized);
1013 		if (!page_size) {
1014 			err = -EINVAL;
1015 			goto err_umem;
1016 		}
1017 		ncont = ib_umem_num_dma_blocks(ubuffer->umem, page_size);
1018 	} else {
1019 		ubuffer->umem = NULL;
1020 	}
1021 
1022 	*inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
1023 		 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont;
1024 	*in = kvzalloc(*inlen, GFP_KERNEL);
1025 	if (!*in) {
1026 		err = -ENOMEM;
1027 		goto err_umem;
1028 	}
1029 
1030 	uid = (attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
1031 	MLX5_SET(create_qp_in, *in, uid, uid);
1032 	qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
1033 	pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
1034 	if (ubuffer->umem) {
1035 		mlx5_ib_populate_pas(ubuffer->umem, page_size, pas, 0);
1036 		MLX5_SET(qpc, qpc, log_page_size,
1037 			 order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
1038 		MLX5_SET(qpc, qpc, page_offset, page_offset_quantized);
1039 	}
1040 	MLX5_SET(qpc, qpc, uar_page, uar_index);
1041 	if (bfregn != MLX5_IB_INVALID_BFREG)
1042 		resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
1043 	else
1044 		resp->bfreg_index = MLX5_IB_INVALID_BFREG;
1045 	qp->bfregn = bfregn;
1046 
1047 	err = mlx5_ib_db_map_user(context, ucmd->db_addr, &qp->db);
1048 	if (err) {
1049 		mlx5_ib_dbg(dev, "map failed\n");
1050 		goto err_free;
1051 	}
1052 
1053 	return 0;
1054 
1055 err_free:
1056 	kvfree(*in);
1057 
1058 err_umem:
1059 	ib_umem_release(ubuffer->umem);
1060 
1061 err_bfreg:
1062 	if (bfregn != MLX5_IB_INVALID_BFREG)
1063 		mlx5_ib_free_bfreg(dev, &context->bfregi, bfregn);
1064 	return err;
1065 }
1066 
1067 static void destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1068 		       struct mlx5_ib_qp_base *base, struct ib_udata *udata)
1069 {
1070 	struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
1071 		udata, struct mlx5_ib_ucontext, ibucontext);
1072 
1073 	if (udata) {
1074 		/* User QP */
1075 		mlx5_ib_db_unmap_user(context, &qp->db);
1076 		ib_umem_release(base->ubuffer.umem);
1077 
1078 		/*
1079 		 * Free only the BFREGs which are handled by the kernel.
1080 		 * BFREGs of UARs allocated dynamically are handled by user.
1081 		 */
1082 		if (qp->bfregn != MLX5_IB_INVALID_BFREG)
1083 			mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
1084 		return;
1085 	}
1086 
1087 	/* Kernel QP */
1088 	kvfree(qp->sq.wqe_head);
1089 	kvfree(qp->sq.w_list);
1090 	kvfree(qp->sq.wrid);
1091 	kvfree(qp->sq.wr_data);
1092 	kvfree(qp->rq.wrid);
1093 	if (qp->db.db)
1094 		mlx5_db_free(dev->mdev, &qp->db);
1095 	if (qp->buf.frags)
1096 		mlx5_frag_buf_free(dev->mdev, &qp->buf);
1097 }
1098 
1099 static int _create_kernel_qp(struct mlx5_ib_dev *dev,
1100 			     struct ib_qp_init_attr *init_attr,
1101 			     struct mlx5_ib_qp *qp, u32 **in, int *inlen,
1102 			     struct mlx5_ib_qp_base *base)
1103 {
1104 	int uar_index;
1105 	void *qpc;
1106 	int err;
1107 
1108 	if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
1109 		qp->bf.bfreg = &dev->fp_bfreg;
1110 	else
1111 		qp->bf.bfreg = &dev->bfreg;
1112 
1113 	/* We need to divide by two since each register is comprised of
1114 	 * two buffers of identical size, namely odd and even
1115 	 */
1116 	qp->bf.buf_size = (1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size)) / 2;
1117 	uar_index = qp->bf.bfreg->index;
1118 
1119 	err = calc_sq_size(dev, init_attr, qp);
1120 	if (err < 0) {
1121 		mlx5_ib_dbg(dev, "err %d\n", err);
1122 		return err;
1123 	}
1124 
1125 	qp->rq.offset = 0;
1126 	qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
1127 	base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
1128 
1129 	err = mlx5_frag_buf_alloc_node(dev->mdev, base->ubuffer.buf_size,
1130 				       &qp->buf, dev->mdev->priv.numa_node);
1131 	if (err) {
1132 		mlx5_ib_dbg(dev, "err %d\n", err);
1133 		return err;
1134 	}
1135 
1136 	if (qp->rq.wqe_cnt)
1137 		mlx5_init_fbc(qp->buf.frags, qp->rq.wqe_shift,
1138 			      ilog2(qp->rq.wqe_cnt), &qp->rq.fbc);
1139 
1140 	if (qp->sq.wqe_cnt) {
1141 		int sq_strides_offset = (qp->sq.offset  & (PAGE_SIZE - 1)) /
1142 					MLX5_SEND_WQE_BB;
1143 		mlx5_init_fbc_offset(qp->buf.frags +
1144 				     (qp->sq.offset / PAGE_SIZE),
1145 				     ilog2(MLX5_SEND_WQE_BB),
1146 				     ilog2(qp->sq.wqe_cnt),
1147 				     sq_strides_offset, &qp->sq.fbc);
1148 
1149 		qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
1150 	}
1151 
1152 	*inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
1153 		 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages;
1154 	*in = kvzalloc(*inlen, GFP_KERNEL);
1155 	if (!*in) {
1156 		err = -ENOMEM;
1157 		goto err_buf;
1158 	}
1159 
1160 	qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
1161 	MLX5_SET(qpc, qpc, uar_page, uar_index);
1162 	MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev));
1163 	MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1164 
1165 	/* Set "fast registration enabled" for all kernel QPs */
1166 	MLX5_SET(qpc, qpc, fre, 1);
1167 	MLX5_SET(qpc, qpc, rlky, 1);
1168 
1169 	if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
1170 		MLX5_SET(qpc, qpc, deth_sqpn, 1);
1171 
1172 	mlx5_fill_page_frag_array(&qp->buf,
1173 				  (__be64 *)MLX5_ADDR_OF(create_qp_in,
1174 							 *in, pas));
1175 
1176 	err = mlx5_db_alloc(dev->mdev, &qp->db);
1177 	if (err) {
1178 		mlx5_ib_dbg(dev, "err %d\n", err);
1179 		goto err_free;
1180 	}
1181 
1182 	qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt,
1183 				     sizeof(*qp->sq.wrid), GFP_KERNEL);
1184 	qp->sq.wr_data = kvmalloc_array(qp->sq.wqe_cnt,
1185 					sizeof(*qp->sq.wr_data), GFP_KERNEL);
1186 	qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
1187 				     sizeof(*qp->rq.wrid), GFP_KERNEL);
1188 	qp->sq.w_list = kvmalloc_array(qp->sq.wqe_cnt,
1189 				       sizeof(*qp->sq.w_list), GFP_KERNEL);
1190 	qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt,
1191 					 sizeof(*qp->sq.wqe_head), GFP_KERNEL);
1192 
1193 	if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
1194 	    !qp->sq.w_list || !qp->sq.wqe_head) {
1195 		err = -ENOMEM;
1196 		goto err_wrid;
1197 	}
1198 
1199 	return 0;
1200 
1201 err_wrid:
1202 	kvfree(qp->sq.wqe_head);
1203 	kvfree(qp->sq.w_list);
1204 	kvfree(qp->sq.wrid);
1205 	kvfree(qp->sq.wr_data);
1206 	kvfree(qp->rq.wrid);
1207 	mlx5_db_free(dev->mdev, &qp->db);
1208 
1209 err_free:
1210 	kvfree(*in);
1211 
1212 err_buf:
1213 	mlx5_frag_buf_free(dev->mdev, &qp->buf);
1214 	return err;
1215 }
1216 
1217 static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
1218 {
1219 	if (attr->srq || (qp->type == IB_QPT_XRC_TGT) ||
1220 	    (qp->type == MLX5_IB_QPT_DCI) || (qp->type == IB_QPT_XRC_INI))
1221 		return MLX5_SRQ_RQ;
1222 	else if (!qp->has_rq)
1223 		return MLX5_ZERO_LEN_RQ;
1224 
1225 	return MLX5_NON_ZERO_RQ;
1226 }
1227 
1228 static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
1229 				    struct mlx5_ib_qp *qp,
1230 				    struct mlx5_ib_sq *sq, u32 tdn,
1231 				    struct ib_pd *pd)
1232 {
1233 	u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
1234 	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
1235 
1236 	MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid);
1237 	MLX5_SET(tisc, tisc, transport_domain, tdn);
1238 	if (!mlx5_ib_lag_should_assign_affinity(dev) &&
1239 	    mlx5_lag_is_lacp_owner(dev->mdev))
1240 		MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
1241 	if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
1242 		MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
1243 
1244 	return mlx5_core_create_tis(dev->mdev, in, &sq->tisn);
1245 }
1246 
1247 static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
1248 				      struct mlx5_ib_sq *sq, struct ib_pd *pd)
1249 {
1250 	mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid);
1251 }
1252 
1253 static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq)
1254 {
1255 	if (sq->flow_rule)
1256 		mlx5_del_flow_rules(sq->flow_rule);
1257 	sq->flow_rule = NULL;
1258 }
1259 
1260 static bool fr_supported(int ts_cap)
1261 {
1262 	return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
1263 	       ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
1264 }
1265 
1266 static int get_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1267 			 bool fr_sup, bool rt_sup)
1268 {
1269 	if (cq->private_flags & MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS) {
1270 		if (!rt_sup) {
1271 			mlx5_ib_dbg(dev,
1272 				    "Real time TS format is not supported\n");
1273 			return -EOPNOTSUPP;
1274 		}
1275 		return MLX5_TIMESTAMP_FORMAT_REAL_TIME;
1276 	}
1277 	if (cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) {
1278 		if (!fr_sup) {
1279 			mlx5_ib_dbg(dev,
1280 				    "Free running TS format is not supported\n");
1281 			return -EOPNOTSUPP;
1282 		}
1283 		return MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
1284 	}
1285 	return fr_sup ? MLX5_TIMESTAMP_FORMAT_FREE_RUNNING :
1286 			MLX5_TIMESTAMP_FORMAT_DEFAULT;
1287 }
1288 
1289 static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *recv_cq)
1290 {
1291 	u8 ts_cap = MLX5_CAP_GEN(dev->mdev, rq_ts_format);
1292 
1293 	return get_ts_format(dev, recv_cq, fr_supported(ts_cap),
1294 			     rt_supported(ts_cap));
1295 }
1296 
1297 static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
1298 {
1299 	u8 ts_cap = MLX5_CAP_GEN(dev->mdev, sq_ts_format);
1300 
1301 	return get_ts_format(dev, send_cq, fr_supported(ts_cap),
1302 			     rt_supported(ts_cap));
1303 }
1304 
1305 static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq,
1306 			    struct mlx5_ib_cq *recv_cq)
1307 {
1308 	u8 ts_cap = MLX5_CAP_ROCE(dev->mdev, qp_ts_format);
1309 	bool fr_sup = fr_supported(ts_cap);
1310 	bool rt_sup = rt_supported(ts_cap);
1311 	u8 default_ts = fr_sup ? MLX5_TIMESTAMP_FORMAT_FREE_RUNNING :
1312 				 MLX5_TIMESTAMP_FORMAT_DEFAULT;
1313 	int send_ts_format =
1314 		send_cq ? get_ts_format(dev, send_cq, fr_sup, rt_sup) :
1315 			  default_ts;
1316 	int recv_ts_format =
1317 		recv_cq ? get_ts_format(dev, recv_cq, fr_sup, rt_sup) :
1318 			  default_ts;
1319 
1320 	if (send_ts_format < 0 || recv_ts_format < 0)
1321 		return -EOPNOTSUPP;
1322 
1323 	if (send_ts_format != MLX5_TIMESTAMP_FORMAT_DEFAULT &&
1324 	    recv_ts_format != MLX5_TIMESTAMP_FORMAT_DEFAULT &&
1325 	    send_ts_format != recv_ts_format) {
1326 		mlx5_ib_dbg(
1327 			dev,
1328 			"The send ts_format does not match the receive ts_format\n");
1329 		return -EOPNOTSUPP;
1330 	}
1331 
1332 	return send_ts_format == default_ts ? recv_ts_format : send_ts_format;
1333 }
1334 
1335 static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
1336 				   struct ib_udata *udata,
1337 				   struct mlx5_ib_sq *sq, void *qpin,
1338 				   struct ib_pd *pd, struct mlx5_ib_cq *cq)
1339 {
1340 	struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer;
1341 	__be64 *pas;
1342 	void *in;
1343 	void *sqc;
1344 	void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
1345 	void *wq;
1346 	int inlen;
1347 	int err;
1348 	unsigned int page_offset_quantized;
1349 	unsigned long page_size;
1350 	int ts_format;
1351 
1352 	ts_format = get_sq_ts_format(dev, cq);
1353 	if (ts_format < 0)
1354 		return ts_format;
1355 
1356 	sq->ubuffer.umem = ib_umem_get(&dev->ib_dev, ubuffer->buf_addr,
1357 				       ubuffer->buf_size, 0);
1358 	if (IS_ERR(sq->ubuffer.umem))
1359 		return PTR_ERR(sq->ubuffer.umem);
1360 	page_size = mlx5_umem_find_best_quantized_pgoff(
1361 		ubuffer->umem, wq, log_wq_pg_sz, MLX5_ADAPTER_PAGE_SHIFT,
1362 		page_offset, 64, &page_offset_quantized);
1363 	if (!page_size) {
1364 		err = -EINVAL;
1365 		goto err_umem;
1366 	}
1367 
1368 	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1369 		sizeof(u64) *
1370 			ib_umem_num_dma_blocks(sq->ubuffer.umem, page_size);
1371 	in = kvzalloc(inlen, GFP_KERNEL);
1372 	if (!in) {
1373 		err = -ENOMEM;
1374 		goto err_umem;
1375 	}
1376 
1377 	MLX5_SET(create_sq_in, in, uid, to_mpd(pd)->uid);
1378 	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1379 	MLX5_SET(sqc, sqc, flush_in_error_en, 1);
1380 	if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
1381 		MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1);
1382 	MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1383 	MLX5_SET(sqc, sqc, ts_format, ts_format);
1384 	MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index));
1385 	MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd));
1386 	MLX5_SET(sqc, sqc, tis_lst_sz, 1);
1387 	MLX5_SET(sqc, sqc, tis_num_0, sq->tisn);
1388 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
1389 	    MLX5_CAP_ETH(dev->mdev, swp))
1390 		MLX5_SET(sqc, sqc, allow_swp, 1);
1391 
1392 	wq = MLX5_ADDR_OF(sqc, sqc, wq);
1393 	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1394 	MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
1395 	MLX5_SET(wq, wq, uar_page, MLX5_GET(qpc, qpc, uar_page));
1396 	MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
1397 	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1398 	MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_sq_size));
1399 	MLX5_SET(wq, wq, log_wq_pg_sz,
1400 		 order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
1401 	MLX5_SET(wq, wq, page_offset, page_offset_quantized);
1402 
1403 	pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
1404 	mlx5_ib_populate_pas(sq->ubuffer.umem, page_size, pas, 0);
1405 
1406 	err = mlx5_core_create_sq_tracked(dev, in, inlen, &sq->base.mqp);
1407 
1408 	kvfree(in);
1409 
1410 	if (err)
1411 		goto err_umem;
1412 
1413 	return 0;
1414 
1415 err_umem:
1416 	ib_umem_release(sq->ubuffer.umem);
1417 	sq->ubuffer.umem = NULL;
1418 
1419 	return err;
1420 }
1421 
1422 static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
1423 				     struct mlx5_ib_sq *sq)
1424 {
1425 	destroy_flow_rule_vport_sq(sq);
1426 	mlx5_core_destroy_sq_tracked(dev, &sq->base.mqp);
1427 	ib_umem_release(sq->ubuffer.umem);
1428 }
1429 
1430 static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1431 				   struct mlx5_ib_rq *rq, void *qpin,
1432 				   struct ib_pd *pd, struct mlx5_ib_cq *cq)
1433 {
1434 	struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
1435 	__be64 *pas;
1436 	void *in;
1437 	void *rqc;
1438 	void *wq;
1439 	void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
1440 	struct ib_umem *umem = rq->base.ubuffer.umem;
1441 	unsigned int page_offset_quantized;
1442 	unsigned long page_size = 0;
1443 	int ts_format;
1444 	size_t inlen;
1445 	int err;
1446 
1447 	ts_format = get_rq_ts_format(dev, cq);
1448 	if (ts_format < 0)
1449 		return ts_format;
1450 
1451 	page_size = mlx5_umem_find_best_quantized_pgoff(umem, wq, log_wq_pg_sz,
1452 							MLX5_ADAPTER_PAGE_SHIFT,
1453 							page_offset, 64,
1454 							&page_offset_quantized);
1455 	if (!page_size)
1456 		return -EINVAL;
1457 
1458 	inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
1459 		sizeof(u64) * ib_umem_num_dma_blocks(umem, page_size);
1460 	in = kvzalloc(inlen, GFP_KERNEL);
1461 	if (!in)
1462 		return -ENOMEM;
1463 
1464 	MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
1465 	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
1466 	if (!(rq->flags & MLX5_IB_RQ_CVLAN_STRIPPING))
1467 		MLX5_SET(rqc, rqc, vsd, 1);
1468 	MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
1469 	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
1470 	MLX5_SET(rqc, rqc, ts_format, ts_format);
1471 	MLX5_SET(rqc, rqc, flush_in_error_en, 1);
1472 	MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index));
1473 	MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv));
1474 
1475 	if (mqp->flags & IB_QP_CREATE_SCATTER_FCS)
1476 		MLX5_SET(rqc, rqc, scatter_fcs, 1);
1477 
1478 	wq = MLX5_ADDR_OF(rqc, rqc, wq);
1479 	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1480 	if (rq->flags & MLX5_IB_RQ_PCI_WRITE_END_PADDING)
1481 		MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1482 	MLX5_SET(wq, wq, page_offset, page_offset_quantized);
1483 	MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
1484 	MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
1485 	MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(qpc, qpc, log_rq_stride) + 4);
1486 	MLX5_SET(wq, wq, log_wq_pg_sz,
1487 		 order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
1488 	MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_rq_size));
1489 
1490 	pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
1491 	mlx5_ib_populate_pas(umem, page_size, pas, 0);
1492 
1493 	err = mlx5_core_create_rq_tracked(dev, in, inlen, &rq->base.mqp);
1494 
1495 	kvfree(in);
1496 
1497 	return err;
1498 }
1499 
1500 static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1501 				     struct mlx5_ib_rq *rq)
1502 {
1503 	mlx5_core_destroy_rq_tracked(dev, &rq->base.mqp);
1504 }
1505 
1506 static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
1507 				      struct mlx5_ib_rq *rq,
1508 				      u32 qp_flags_en,
1509 				      struct ib_pd *pd)
1510 {
1511 	if (qp_flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
1512 			   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
1513 		mlx5_ib_disable_lb(dev, false, true);
1514 	mlx5_cmd_destroy_tir(dev->mdev, rq->tirn, to_mpd(pd)->uid);
1515 }
1516 
1517 static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
1518 				    struct mlx5_ib_rq *rq, u32 tdn,
1519 				    u32 *qp_flags_en, struct ib_pd *pd,
1520 				    u32 *out)
1521 {
1522 	u8 lb_flag = 0;
1523 	u32 *in;
1524 	void *tirc;
1525 	int inlen;
1526 	int err;
1527 
1528 	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1529 	in = kvzalloc(inlen, GFP_KERNEL);
1530 	if (!in)
1531 		return -ENOMEM;
1532 
1533 	MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
1534 	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1535 	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
1536 	MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
1537 	MLX5_SET(tirc, tirc, transport_domain, tdn);
1538 	if (*qp_flags_en & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
1539 		MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
1540 
1541 	if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
1542 		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
1543 
1544 	if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
1545 		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
1546 
1547 	if (dev->is_rep) {
1548 		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
1549 		*qp_flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
1550 	}
1551 
1552 	MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
1553 	MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
1554 	err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out);
1555 	rq->tirn = MLX5_GET(create_tir_out, out, tirn);
1556 	if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
1557 		err = mlx5_ib_enable_lb(dev, false, true);
1558 
1559 		if (err)
1560 			destroy_raw_packet_qp_tir(dev, rq, 0, pd);
1561 	}
1562 	kvfree(in);
1563 
1564 	return err;
1565 }
1566 
1567 static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1568 				u32 *in, size_t inlen, struct ib_pd *pd,
1569 				struct ib_udata *udata,
1570 				struct mlx5_ib_create_qp_resp *resp,
1571 				struct ib_qp_init_attr *init_attr)
1572 {
1573 	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
1574 	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
1575 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
1576 	struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
1577 		udata, struct mlx5_ib_ucontext, ibucontext);
1578 	int err;
1579 	u32 tdn = mucontext->tdn;
1580 	u16 uid = to_mpd(pd)->uid;
1581 	u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
1582 
1583 	if (!qp->sq.wqe_cnt && !qp->rq.wqe_cnt)
1584 		return -EINVAL;
1585 	if (qp->sq.wqe_cnt) {
1586 		err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd);
1587 		if (err)
1588 			return err;
1589 
1590 		err = create_raw_packet_qp_sq(dev, udata, sq, in, pd,
1591 					      to_mcq(init_attr->send_cq));
1592 		if (err)
1593 			goto err_destroy_tis;
1594 
1595 		if (uid) {
1596 			resp->tisn = sq->tisn;
1597 			resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TISN;
1598 			resp->sqn = sq->base.mqp.qpn;
1599 			resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_SQN;
1600 		}
1601 
1602 		sq->base.container_mibqp = qp;
1603 		sq->base.mqp.event = mlx5_ib_qp_event;
1604 	}
1605 
1606 	if (qp->rq.wqe_cnt) {
1607 		rq->base.container_mibqp = qp;
1608 
1609 		if (qp->flags & IB_QP_CREATE_CVLAN_STRIPPING)
1610 			rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
1611 		if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING)
1612 			rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
1613 		err = create_raw_packet_qp_rq(dev, rq, in, pd,
1614 					      to_mcq(init_attr->recv_cq));
1615 		if (err)
1616 			goto err_destroy_sq;
1617 
1618 		err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd,
1619 					       out);
1620 		if (err)
1621 			goto err_destroy_rq;
1622 
1623 		if (uid) {
1624 			resp->rqn = rq->base.mqp.qpn;
1625 			resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_RQN;
1626 			resp->tirn = rq->tirn;
1627 			resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
1628 			if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
1629 			    MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2)) {
1630 				resp->tir_icm_addr = MLX5_GET(
1631 					create_tir_out, out, icm_address_31_0);
1632 				resp->tir_icm_addr |=
1633 					(u64)MLX5_GET(create_tir_out, out,
1634 						      icm_address_39_32)
1635 					<< 32;
1636 				resp->tir_icm_addr |=
1637 					(u64)MLX5_GET(create_tir_out, out,
1638 						      icm_address_63_40)
1639 					<< 40;
1640 				resp->comp_mask |=
1641 					MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
1642 			}
1643 		}
1644 	}
1645 
1646 	qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn :
1647 						     rq->base.mqp.qpn;
1648 	return 0;
1649 
1650 err_destroy_rq:
1651 	destroy_raw_packet_qp_rq(dev, rq);
1652 err_destroy_sq:
1653 	if (!qp->sq.wqe_cnt)
1654 		return err;
1655 	destroy_raw_packet_qp_sq(dev, sq);
1656 err_destroy_tis:
1657 	destroy_raw_packet_qp_tis(dev, sq, pd);
1658 
1659 	return err;
1660 }
1661 
1662 static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev,
1663 				  struct mlx5_ib_qp *qp)
1664 {
1665 	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
1666 	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
1667 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
1668 
1669 	if (qp->rq.wqe_cnt) {
1670 		destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, qp->ibqp.pd);
1671 		destroy_raw_packet_qp_rq(dev, rq);
1672 	}
1673 
1674 	if (qp->sq.wqe_cnt) {
1675 		destroy_raw_packet_qp_sq(dev, sq);
1676 		destroy_raw_packet_qp_tis(dev, sq, qp->ibqp.pd);
1677 	}
1678 }
1679 
1680 static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,
1681 				    struct mlx5_ib_raw_packet_qp *raw_packet_qp)
1682 {
1683 	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
1684 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
1685 
1686 	sq->sq = &qp->sq;
1687 	rq->rq = &qp->rq;
1688 	sq->doorbell = &qp->db;
1689 	rq->doorbell = &qp->db;
1690 }
1691 
1692 static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1693 {
1694 	if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
1695 			    MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
1696 		mlx5_ib_disable_lb(dev, false, true);
1697 	mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
1698 			     to_mpd(qp->ibqp.pd)->uid);
1699 }
1700 
1701 struct mlx5_create_qp_params {
1702 	struct ib_udata *udata;
1703 	size_t inlen;
1704 	size_t outlen;
1705 	size_t ucmd_size;
1706 	void *ucmd;
1707 	u8 is_rss_raw : 1;
1708 	struct ib_qp_init_attr *attr;
1709 	u32 uidx;
1710 	struct mlx5_ib_create_qp_resp resp;
1711 };
1712 
1713 static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1714 				 struct mlx5_ib_qp *qp,
1715 				 struct mlx5_create_qp_params *params)
1716 {
1717 	struct ib_qp_init_attr *init_attr = params->attr;
1718 	struct mlx5_ib_create_qp_rss *ucmd = params->ucmd;
1719 	struct ib_udata *udata = params->udata;
1720 	struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
1721 		udata, struct mlx5_ib_ucontext, ibucontext);
1722 	int inlen;
1723 	int outlen;
1724 	int err;
1725 	u32 *in;
1726 	u32 *out;
1727 	void *tirc;
1728 	void *hfso;
1729 	u32 selected_fields = 0;
1730 	u32 outer_l4;
1731 	u32 tdn = mucontext->tdn;
1732 	u8 lb_flag = 0;
1733 
1734 	if (ucmd->comp_mask) {
1735 		mlx5_ib_dbg(dev, "invalid comp mask\n");
1736 		return -EOPNOTSUPP;
1737 	}
1738 
1739 	if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
1740 	    !(ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
1741 		mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n");
1742 		return -EOPNOTSUPP;
1743 	}
1744 
1745 	if (dev->is_rep)
1746 		qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
1747 
1748 	if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
1749 		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
1750 
1751 	if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
1752 		lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
1753 
1754 	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1755 	outlen = MLX5_ST_SZ_BYTES(create_tir_out);
1756 	in = kvzalloc(inlen + outlen, GFP_KERNEL);
1757 	if (!in)
1758 		return -ENOMEM;
1759 
1760 	out = in + MLX5_ST_SZ_DW(create_tir_in);
1761 	MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
1762 	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1763 	MLX5_SET(tirc, tirc, disp_type,
1764 		 MLX5_TIRC_DISP_TYPE_INDIRECT);
1765 	MLX5_SET(tirc, tirc, indirect_table,
1766 		 init_attr->rwq_ind_tbl->ind_tbl_num);
1767 	MLX5_SET(tirc, tirc, transport_domain, tdn);
1768 
1769 	hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1770 
1771 	if (ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
1772 		MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
1773 
1774 	MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
1775 
1776 	if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER)
1777 		hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
1778 	else
1779 		hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1780 
1781 	switch (ucmd->rx_hash_function) {
1782 	case MLX5_RX_HASH_FUNC_TOEPLITZ:
1783 	{
1784 		void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
1785 		size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
1786 
1787 		if (len != ucmd->rx_key_len) {
1788 			err = -EINVAL;
1789 			goto err;
1790 		}
1791 
1792 		MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
1793 		memcpy(rss_key, ucmd->rx_hash_key, len);
1794 		break;
1795 	}
1796 	default:
1797 		err = -EOPNOTSUPP;
1798 		goto err;
1799 	}
1800 
1801 	if (!ucmd->rx_hash_fields_mask) {
1802 		/* special case when this TIR serves as steering entry without hashing */
1803 		if (!init_attr->rwq_ind_tbl->log_ind_tbl_size)
1804 			goto create_tir;
1805 		err = -EINVAL;
1806 		goto err;
1807 	}
1808 
1809 	if (((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1810 	     (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
1811 	     ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
1812 	     (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
1813 		err = -EINVAL;
1814 		goto err;
1815 	}
1816 
1817 	/* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
1818 	if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1819 	    (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
1820 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1821 			 MLX5_L3_PROT_TYPE_IPV4);
1822 	else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
1823 		 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
1824 		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1825 			 MLX5_L3_PROT_TYPE_IPV6);
1826 
1827 	outer_l4 = ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1828 		    (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
1829 			   << 0 |
1830 		   ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
1831 		    (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
1832 			   << 1 |
1833 		   (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2;
1834 
1835 	/* Check that only one l4 protocol is set */
1836 	if (outer_l4 & (outer_l4 - 1)) {
1837 		err = -EINVAL;
1838 		goto err;
1839 	}
1840 
1841 	/* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
1842 	if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1843 	    (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
1844 		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1845 			 MLX5_L4_PROT_TYPE_TCP);
1846 	else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
1847 		 (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
1848 		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1849 			 MLX5_L4_PROT_TYPE_UDP);
1850 
1851 	if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
1852 	    (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
1853 		selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP;
1854 
1855 	if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
1856 	    (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
1857 		selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP;
1858 
1859 	if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
1860 	    (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
1861 		selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT;
1862 
1863 	if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
1864 	    (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
1865 		selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT;
1866 
1867 	if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI)
1868 		selected_fields |= MLX5_HASH_FIELD_SEL_IPSEC_SPI;
1869 
1870 	MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
1871 
1872 create_tir:
1873 	MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
1874 	err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out);
1875 
1876 	qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn);
1877 	if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
1878 		err = mlx5_ib_enable_lb(dev, false, true);
1879 
1880 		if (err)
1881 			mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
1882 					     to_mpd(pd)->uid);
1883 	}
1884 
1885 	if (err)
1886 		goto err;
1887 
1888 	if (mucontext->devx_uid) {
1889 		params->resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
1890 		params->resp.tirn = qp->rss_qp.tirn;
1891 		if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
1892 		    MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2)) {
1893 			params->resp.tir_icm_addr =
1894 				MLX5_GET(create_tir_out, out, icm_address_31_0);
1895 			params->resp.tir_icm_addr |=
1896 				(u64)MLX5_GET(create_tir_out, out,
1897 					      icm_address_39_32)
1898 				<< 32;
1899 			params->resp.tir_icm_addr |=
1900 				(u64)MLX5_GET(create_tir_out, out,
1901 					      icm_address_63_40)
1902 				<< 40;
1903 			params->resp.comp_mask |=
1904 				MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
1905 		}
1906 	}
1907 
1908 	kvfree(in);
1909 	/* qpn is reserved for that QP */
1910 	qp->trans_qp.base.mqp.qpn = 0;
1911 	qp->is_rss = true;
1912 	return 0;
1913 
1914 err:
1915 	kvfree(in);
1916 	return err;
1917 }
1918 
1919 static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
1920 					 struct mlx5_ib_qp *qp,
1921 					 struct ib_qp_init_attr *init_attr,
1922 					 void *qpc)
1923 {
1924 	int scqe_sz;
1925 	bool allow_scat_cqe = false;
1926 
1927 	allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
1928 
1929 	if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
1930 		return;
1931 
1932 	scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq);
1933 	if (scqe_sz == 128) {
1934 		MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
1935 		return;
1936 	}
1937 
1938 	if (init_attr->qp_type != MLX5_IB_QPT_DCI ||
1939 	    MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe))
1940 		MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
1941 }
1942 
1943 static int atomic_size_to_mode(int size_mask)
1944 {
1945 	/* driver does not support atomic_size > 256B
1946 	 * and does not know how to translate bigger sizes
1947 	 */
1948 	int supported_size_mask = size_mask & 0x1ff;
1949 	int log_max_size;
1950 
1951 	if (!supported_size_mask)
1952 		return -EOPNOTSUPP;
1953 
1954 	log_max_size = __fls(supported_size_mask);
1955 
1956 	if (log_max_size > 3)
1957 		return log_max_size;
1958 
1959 	return MLX5_ATOMIC_MODE_8B;
1960 }
1961 
1962 static int get_atomic_mode(struct mlx5_ib_dev *dev,
1963 			   struct mlx5_ib_qp *qp)
1964 {
1965 	u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
1966 	u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic);
1967 	int atomic_mode = -EOPNOTSUPP;
1968 	int atomic_size_mask;
1969 
1970 	if (!atomic)
1971 		return -EOPNOTSUPP;
1972 
1973 	if (qp->type == MLX5_IB_QPT_DCT)
1974 		atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
1975 	else
1976 		atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
1977 
1978 	if ((atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP) ||
1979 	    (atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD))
1980 		atomic_mode = atomic_size_to_mode(atomic_size_mask);
1981 
1982 	if (atomic_mode <= 0 &&
1983 	    (atomic_operations & MLX5_ATOMIC_OPS_CMP_SWAP &&
1984 	     atomic_operations & MLX5_ATOMIC_OPS_FETCH_ADD))
1985 		atomic_mode = MLX5_ATOMIC_MODE_IB_COMP;
1986 
1987 	/* OOO DP QPs do not support larger than 8-Bytes atomic operations */
1988 	if (atomic_mode > MLX5_ATOMIC_MODE_8B && qp->is_ooo_rq)
1989 		atomic_mode = MLX5_ATOMIC_MODE_8B;
1990 
1991 	return atomic_mode;
1992 }
1993 
1994 static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1995 			     struct mlx5_create_qp_params *params)
1996 {
1997 	struct ib_qp_init_attr *attr = params->attr;
1998 	u32 uidx = params->uidx;
1999 	struct mlx5_ib_resources *devr = &dev->devr;
2000 	u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
2001 	int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
2002 	struct mlx5_core_dev *mdev = dev->mdev;
2003 	struct mlx5_ib_qp_base *base;
2004 	unsigned long flags;
2005 	void *qpc;
2006 	u32 *in;
2007 	int err;
2008 
2009 	if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
2010 		qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
2011 
2012 	in = kvzalloc(inlen, GFP_KERNEL);
2013 	if (!in)
2014 		return -ENOMEM;
2015 
2016 	qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
2017 
2018 	MLX5_SET(qpc, qpc, st, MLX5_QP_ST_XRC);
2019 	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
2020 	MLX5_SET(qpc, qpc, pd, to_mpd(devr->p0)->pdn);
2021 
2022 	if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
2023 		MLX5_SET(qpc, qpc, block_lb_mc, 1);
2024 	if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
2025 		MLX5_SET(qpc, qpc, cd_master, 1);
2026 	if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
2027 		MLX5_SET(qpc, qpc, cd_slave_send, 1);
2028 	if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
2029 		MLX5_SET(qpc, qpc, cd_slave_receive, 1);
2030 
2031 	MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev));
2032 	MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ);
2033 	MLX5_SET(qpc, qpc, no_sq, 1);
2034 	MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
2035 	MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
2036 	MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
2037 	MLX5_SET(qpc, qpc, xrcd, to_mxrcd(attr->xrcd)->xrcdn);
2038 	MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
2039 
2040 	/* 0xffffff means we ask to work with cqe version 0 */
2041 	if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
2042 		MLX5_SET(qpc, qpc, user_index, uidx);
2043 
2044 	if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
2045 		MLX5_SET(qpc, qpc, end_padding_mode,
2046 			 MLX5_WQ_END_PAD_MODE_ALIGN);
2047 		/* Special case to clean flag */
2048 		qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
2049 	}
2050 
2051 	base = &qp->trans_qp.base;
2052 	err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
2053 	kvfree(in);
2054 	if (err)
2055 		return err;
2056 
2057 	base->container_mibqp = qp;
2058 	base->mqp.event = mlx5_ib_qp_event;
2059 	if (MLX5_CAP_GEN(mdev, ece_support))
2060 		params->resp.ece_options = MLX5_GET(create_qp_out, out, ece);
2061 
2062 	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
2063 	list_add_tail(&qp->qps_list, &dev->qp_list);
2064 	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
2065 
2066 	qp->trans_qp.xrcdn = to_mxrcd(attr->xrcd)->xrcdn;
2067 	return 0;
2068 }
2069 
2070 static int create_dci(struct mlx5_ib_dev *dev, struct ib_pd *pd,
2071 		      struct mlx5_ib_qp *qp,
2072 		      struct mlx5_create_qp_params *params)
2073 {
2074 	struct ib_qp_init_attr *init_attr = params->attr;
2075 	struct mlx5_ib_create_qp *ucmd = params->ucmd;
2076 	u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
2077 	struct ib_udata *udata = params->udata;
2078 	u32 uidx = params->uidx;
2079 	struct mlx5_ib_resources *devr = &dev->devr;
2080 	int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
2081 	struct mlx5_core_dev *mdev = dev->mdev;
2082 	struct mlx5_ib_cq *send_cq;
2083 	struct mlx5_ib_cq *recv_cq;
2084 	unsigned long flags;
2085 	struct mlx5_ib_qp_base *base;
2086 	int ts_format;
2087 	int mlx5_st;
2088 	void *qpc;
2089 	u32 *in;
2090 	int err;
2091 
2092 	spin_lock_init(&qp->sq.lock);
2093 	spin_lock_init(&qp->rq.lock);
2094 
2095 	mlx5_st = to_mlx5_st(qp->type);
2096 	if (mlx5_st < 0)
2097 		return -EINVAL;
2098 
2099 	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
2100 		qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
2101 
2102 	base = &qp->trans_qp.base;
2103 
2104 	qp->has_rq = qp_has_rq(init_attr);
2105 	err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd);
2106 	if (err) {
2107 		mlx5_ib_dbg(dev, "err %d\n", err);
2108 		return err;
2109 	}
2110 
2111 	if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
2112 	    ucmd->rq_wqe_count != qp->rq.wqe_cnt)
2113 		return -EINVAL;
2114 
2115 	if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
2116 		return -EINVAL;
2117 
2118 	ts_format = get_qp_ts_format(dev, to_mcq(init_attr->send_cq),
2119 				     to_mcq(init_attr->recv_cq));
2120 
2121 	if (ts_format < 0)
2122 		return ts_format;
2123 
2124 	err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &params->resp,
2125 			      &inlen, base, ucmd);
2126 	if (err)
2127 		return err;
2128 
2129 	if (MLX5_CAP_GEN(mdev, ece_support))
2130 		MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
2131 	qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
2132 
2133 	MLX5_SET(qpc, qpc, st, mlx5_st);
2134 	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
2135 	MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn);
2136 
2137 	if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
2138 		MLX5_SET(qpc, qpc, wq_signature, 1);
2139 
2140 	if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
2141 		MLX5_SET(qpc, qpc, cd_master, 1);
2142 	if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
2143 		MLX5_SET(qpc, qpc, cd_slave_send, 1);
2144 	if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE)
2145 		configure_requester_scat_cqe(dev, qp, init_attr, qpc);
2146 
2147 	if (qp->rq.wqe_cnt) {
2148 		MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
2149 		MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
2150 	}
2151 
2152 	if (qp->flags_en & MLX5_QP_FLAG_DCI_STREAM) {
2153 		MLX5_SET(qpc, qpc, log_num_dci_stream_channels,
2154 			 ucmd->dci_streams.log_num_concurent);
2155 		MLX5_SET(qpc, qpc, log_num_dci_errored_streams,
2156 			 ucmd->dci_streams.log_num_errored);
2157 	}
2158 
2159 	MLX5_SET(qpc, qpc, ts_format, ts_format);
2160 	MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
2161 
2162 	MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
2163 
2164 	/* Set default resources */
2165 	if (init_attr->srq) {
2166 		MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0);
2167 		MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
2168 			 to_msrq(init_attr->srq)->msrq.srqn);
2169 	} else {
2170 		MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1);
2171 		MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
2172 			 to_msrq(devr->s1)->msrq.srqn);
2173 	}
2174 
2175 	if (init_attr->send_cq)
2176 		MLX5_SET(qpc, qpc, cqn_snd,
2177 			 to_mcq(init_attr->send_cq)->mcq.cqn);
2178 
2179 	if (init_attr->recv_cq)
2180 		MLX5_SET(qpc, qpc, cqn_rcv,
2181 			 to_mcq(init_attr->recv_cq)->mcq.cqn);
2182 
2183 	MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
2184 
2185 	/* 0xffffff means we ask to work with cqe version 0 */
2186 	if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
2187 		MLX5_SET(qpc, qpc, user_index, uidx);
2188 
2189 	if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
2190 		MLX5_SET(qpc, qpc, end_padding_mode,
2191 			 MLX5_WQ_END_PAD_MODE_ALIGN);
2192 		/* Special case to clean flag */
2193 		qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
2194 	}
2195 
2196 	err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
2197 
2198 	kvfree(in);
2199 	if (err)
2200 		goto err_create;
2201 
2202 	base->container_mibqp = qp;
2203 	base->mqp.event = mlx5_ib_qp_event;
2204 	if (MLX5_CAP_GEN(mdev, ece_support))
2205 		params->resp.ece_options = MLX5_GET(create_qp_out, out, ece);
2206 
2207 	get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq,
2208 		&send_cq, &recv_cq);
2209 	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
2210 	mlx5_ib_lock_cqs(send_cq, recv_cq);
2211 	/* Maintain device to QPs access, needed for further handling via reset
2212 	 * flow
2213 	 */
2214 	list_add_tail(&qp->qps_list, &dev->qp_list);
2215 	/* Maintain CQ to QPs access, needed for further handling via reset flow
2216 	 */
2217 	if (send_cq)
2218 		list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
2219 	if (recv_cq)
2220 		list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
2221 	mlx5_ib_unlock_cqs(send_cq, recv_cq);
2222 	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
2223 
2224 	return 0;
2225 
2226 err_create:
2227 	destroy_qp(dev, qp, base, udata);
2228 	return err;
2229 }
2230 
2231 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
2232 			  struct mlx5_ib_qp *qp,
2233 			  struct mlx5_create_qp_params *params)
2234 {
2235 	struct ib_qp_init_attr *init_attr = params->attr;
2236 	struct mlx5_ib_create_qp *ucmd = params->ucmd;
2237 	u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
2238 	struct ib_udata *udata = params->udata;
2239 	u32 uidx = params->uidx;
2240 	struct mlx5_ib_resources *devr = &dev->devr;
2241 	int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
2242 	struct mlx5_core_dev *mdev = dev->mdev;
2243 	struct mlx5_ib_cq *send_cq;
2244 	struct mlx5_ib_cq *recv_cq;
2245 	unsigned long flags;
2246 	struct mlx5_ib_qp_base *base;
2247 	int ts_format;
2248 	int mlx5_st;
2249 	void *qpc;
2250 	u32 *in;
2251 	int err;
2252 
2253 	spin_lock_init(&qp->sq.lock);
2254 	spin_lock_init(&qp->rq.lock);
2255 
2256 	mlx5_st = to_mlx5_st(qp->type);
2257 	if (mlx5_st < 0)
2258 		return -EINVAL;
2259 
2260 	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
2261 		qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
2262 
2263 	if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
2264 		qp->underlay_qpn = init_attr->source_qpn;
2265 
2266 	base = (init_attr->qp_type == IB_QPT_RAW_PACKET ||
2267 		qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
2268 	       &qp->raw_packet_qp.rq.base :
2269 	       &qp->trans_qp.base;
2270 
2271 	qp->has_rq = qp_has_rq(init_attr);
2272 	err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd);
2273 	if (err) {
2274 		mlx5_ib_dbg(dev, "err %d\n", err);
2275 		return err;
2276 	}
2277 
2278 	if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
2279 	    ucmd->rq_wqe_count != qp->rq.wqe_cnt)
2280 		return -EINVAL;
2281 
2282 	if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
2283 		return -EINVAL;
2284 
2285 	if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
2286 		ts_format = get_qp_ts_format(dev, to_mcq(init_attr->send_cq),
2287 					     to_mcq(init_attr->recv_cq));
2288 		if (ts_format < 0)
2289 			return ts_format;
2290 	}
2291 
2292 	err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &params->resp,
2293 			      &inlen, base, ucmd);
2294 	if (err)
2295 		return err;
2296 
2297 	if (is_sqp(init_attr->qp_type))
2298 		qp->port = init_attr->port_num;
2299 
2300 	if (MLX5_CAP_GEN(mdev, ece_support))
2301 		MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
2302 	qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
2303 
2304 	MLX5_SET(qpc, qpc, st, mlx5_st);
2305 	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
2306 	MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn);
2307 
2308 	if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
2309 		MLX5_SET(qpc, qpc, wq_signature, 1);
2310 
2311 	if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
2312 		MLX5_SET(qpc, qpc, block_lb_mc, 1);
2313 
2314 	if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
2315 		MLX5_SET(qpc, qpc, cd_master, 1);
2316 	if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
2317 		MLX5_SET(qpc, qpc, cd_slave_send, 1);
2318 	if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
2319 		MLX5_SET(qpc, qpc, cd_slave_receive, 1);
2320 	if (qp->flags_en & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE)
2321 		MLX5_SET(qpc, qpc, req_e2e_credit_mode, 1);
2322 	if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
2323 	    (init_attr->qp_type == IB_QPT_RC ||
2324 	     init_attr->qp_type == IB_QPT_UC)) {
2325 		int rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
2326 
2327 		MLX5_SET(qpc, qpc, cs_res,
2328 			 rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
2329 					  MLX5_RES_SCAT_DATA32_CQE);
2330 	}
2331 	if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
2332 	    (qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
2333 		configure_requester_scat_cqe(dev, qp, init_attr, qpc);
2334 
2335 	if (qp->rq.wqe_cnt) {
2336 		MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
2337 		MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
2338 	}
2339 
2340 	if (init_attr->qp_type != IB_QPT_RAW_PACKET)
2341 		MLX5_SET(qpc, qpc, ts_format, ts_format);
2342 
2343 	MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
2344 
2345 	if (qp->sq.wqe_cnt) {
2346 		MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
2347 	} else {
2348 		MLX5_SET(qpc, qpc, no_sq, 1);
2349 		if (init_attr->srq &&
2350 		    init_attr->srq->srq_type == IB_SRQT_TM)
2351 			MLX5_SET(qpc, qpc, offload_type,
2352 				 MLX5_QPC_OFFLOAD_TYPE_RNDV);
2353 	}
2354 
2355 	/* Set default resources */
2356 	switch (init_attr->qp_type) {
2357 	case IB_QPT_XRC_INI:
2358 		MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
2359 		MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1);
2360 		MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
2361 		break;
2362 	default:
2363 		if (init_attr->srq) {
2364 			MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0);
2365 			MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn);
2366 		} else {
2367 			MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1);
2368 			MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn);
2369 		}
2370 	}
2371 
2372 	if (init_attr->send_cq)
2373 		MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn);
2374 
2375 	if (init_attr->recv_cq)
2376 		MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn);
2377 
2378 	MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
2379 
2380 	/* 0xffffff means we ask to work with cqe version 0 */
2381 	if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
2382 		MLX5_SET(qpc, qpc, user_index, uidx);
2383 
2384 	if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING &&
2385 	    init_attr->qp_type != IB_QPT_RAW_PACKET) {
2386 		MLX5_SET(qpc, qpc, end_padding_mode,
2387 			 MLX5_WQ_END_PAD_MODE_ALIGN);
2388 		/* Special case to clean flag */
2389 		qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
2390 	}
2391 
2392 	if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
2393 	    qp->flags & IB_QP_CREATE_SOURCE_QPN) {
2394 		qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr;
2395 		raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
2396 		err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
2397 					   &params->resp, init_attr);
2398 	} else
2399 		err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
2400 
2401 	kvfree(in);
2402 	if (err)
2403 		goto err_create;
2404 
2405 	base->container_mibqp = qp;
2406 	base->mqp.event = mlx5_ib_qp_event;
2407 	if (MLX5_CAP_GEN(mdev, ece_support))
2408 		params->resp.ece_options = MLX5_GET(create_qp_out, out, ece);
2409 
2410 	get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq,
2411 		&send_cq, &recv_cq);
2412 	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
2413 	mlx5_ib_lock_cqs(send_cq, recv_cq);
2414 	/* Maintain device to QPs access, needed for further handling via reset
2415 	 * flow
2416 	 */
2417 	list_add_tail(&qp->qps_list, &dev->qp_list);
2418 	/* Maintain CQ to QPs access, needed for further handling via reset flow
2419 	 */
2420 	if (send_cq)
2421 		list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
2422 	if (recv_cq)
2423 		list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
2424 	mlx5_ib_unlock_cqs(send_cq, recv_cq);
2425 	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
2426 
2427 	return 0;
2428 
2429 err_create:
2430 	destroy_qp(dev, qp, base, udata);
2431 	return err;
2432 }
2433 
2434 static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
2435 			    struct mlx5_ib_qp *qp,
2436 			    struct mlx5_create_qp_params *params)
2437 {
2438 	struct ib_qp_init_attr *attr = params->attr;
2439 	u32 uidx = params->uidx;
2440 	struct mlx5_ib_resources *devr = &dev->devr;
2441 	u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
2442 	int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
2443 	struct mlx5_core_dev *mdev = dev->mdev;
2444 	struct mlx5_ib_cq *send_cq;
2445 	struct mlx5_ib_cq *recv_cq;
2446 	unsigned long flags;
2447 	struct mlx5_ib_qp_base *base;
2448 	int mlx5_st;
2449 	void *qpc;
2450 	u32 *in;
2451 	int err;
2452 
2453 	spin_lock_init(&qp->sq.lock);
2454 	spin_lock_init(&qp->rq.lock);
2455 
2456 	mlx5_st = to_mlx5_st(qp->type);
2457 	if (mlx5_st < 0)
2458 		return -EINVAL;
2459 
2460 	if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
2461 		qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
2462 
2463 	base = &qp->trans_qp.base;
2464 
2465 	qp->has_rq = qp_has_rq(attr);
2466 	err = set_rq_size(dev, &attr->cap, qp->has_rq, qp, NULL);
2467 	if (err) {
2468 		mlx5_ib_dbg(dev, "err %d\n", err);
2469 		return err;
2470 	}
2471 
2472 	err = _create_kernel_qp(dev, attr, qp, &in, &inlen, base);
2473 	if (err)
2474 		return err;
2475 
2476 	if (is_sqp(attr->qp_type))
2477 		qp->port = attr->port_num;
2478 
2479 	qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
2480 
2481 	MLX5_SET(qpc, qpc, st, mlx5_st);
2482 	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
2483 
2484 	if (attr->qp_type != MLX5_IB_QPT_REG_UMR)
2485 		MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
2486 	else
2487 		MLX5_SET(qpc, qpc, latency_sensitive, 1);
2488 
2489 
2490 	if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
2491 		MLX5_SET(qpc, qpc, block_lb_mc, 1);
2492 
2493 	if (qp->rq.wqe_cnt) {
2494 		MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
2495 		MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
2496 	}
2497 
2498 	MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, attr));
2499 
2500 	if (qp->sq.wqe_cnt)
2501 		MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
2502 	else
2503 		MLX5_SET(qpc, qpc, no_sq, 1);
2504 
2505 	if (attr->srq) {
2506 		MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0);
2507 		MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
2508 			 to_msrq(attr->srq)->msrq.srqn);
2509 	} else {
2510 		MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1);
2511 		MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
2512 			 to_msrq(devr->s1)->msrq.srqn);
2513 	}
2514 
2515 	if (attr->send_cq)
2516 		MLX5_SET(qpc, qpc, cqn_snd, to_mcq(attr->send_cq)->mcq.cqn);
2517 
2518 	if (attr->recv_cq)
2519 		MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(attr->recv_cq)->mcq.cqn);
2520 
2521 	MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
2522 
2523 	/* 0xffffff means we ask to work with cqe version 0 */
2524 	if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
2525 		MLX5_SET(qpc, qpc, user_index, uidx);
2526 
2527 	/* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
2528 	if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO)
2529 		MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
2530 
2531 	if (qp->flags & IB_QP_CREATE_INTEGRITY_EN &&
2532 	    MLX5_CAP_GEN(mdev, go_back_n))
2533 		MLX5_SET(qpc, qpc, retry_mode, MLX5_QP_RM_GO_BACK_N);
2534 
2535 	err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
2536 	kvfree(in);
2537 	if (err)
2538 		goto err_create;
2539 
2540 	base->container_mibqp = qp;
2541 	base->mqp.event = mlx5_ib_qp_event;
2542 
2543 	get_cqs(qp->type, attr->send_cq, attr->recv_cq,
2544 		&send_cq, &recv_cq);
2545 	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
2546 	mlx5_ib_lock_cqs(send_cq, recv_cq);
2547 	/* Maintain device to QPs access, needed for further handling via reset
2548 	 * flow
2549 	 */
2550 	list_add_tail(&qp->qps_list, &dev->qp_list);
2551 	/* Maintain CQ to QPs access, needed for further handling via reset flow
2552 	 */
2553 	if (send_cq)
2554 		list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
2555 	if (recv_cq)
2556 		list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
2557 	mlx5_ib_unlock_cqs(send_cq, recv_cq);
2558 	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
2559 
2560 	return 0;
2561 
2562 err_create:
2563 	destroy_qp(dev, qp, base, NULL);
2564 	return err;
2565 }
2566 
2567 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
2568 	__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
2569 {
2570 	if (send_cq) {
2571 		if (recv_cq) {
2572 			if (send_cq->mcq.cqn < recv_cq->mcq.cqn)  {
2573 				spin_lock(&send_cq->lock);
2574 				spin_lock_nested(&recv_cq->lock,
2575 						 SINGLE_DEPTH_NESTING);
2576 			} else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
2577 				spin_lock(&send_cq->lock);
2578 				__acquire(&recv_cq->lock);
2579 			} else {
2580 				spin_lock(&recv_cq->lock);
2581 				spin_lock_nested(&send_cq->lock,
2582 						 SINGLE_DEPTH_NESTING);
2583 			}
2584 		} else {
2585 			spin_lock(&send_cq->lock);
2586 			__acquire(&recv_cq->lock);
2587 		}
2588 	} else if (recv_cq) {
2589 		spin_lock(&recv_cq->lock);
2590 		__acquire(&send_cq->lock);
2591 	} else {
2592 		__acquire(&send_cq->lock);
2593 		__acquire(&recv_cq->lock);
2594 	}
2595 }
2596 
2597 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
2598 	__releases(&send_cq->lock) __releases(&recv_cq->lock)
2599 {
2600 	if (send_cq) {
2601 		if (recv_cq) {
2602 			if (send_cq->mcq.cqn < recv_cq->mcq.cqn)  {
2603 				spin_unlock(&recv_cq->lock);
2604 				spin_unlock(&send_cq->lock);
2605 			} else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
2606 				__release(&recv_cq->lock);
2607 				spin_unlock(&send_cq->lock);
2608 			} else {
2609 				spin_unlock(&send_cq->lock);
2610 				spin_unlock(&recv_cq->lock);
2611 			}
2612 		} else {
2613 			__release(&recv_cq->lock);
2614 			spin_unlock(&send_cq->lock);
2615 		}
2616 	} else if (recv_cq) {
2617 		__release(&send_cq->lock);
2618 		spin_unlock(&recv_cq->lock);
2619 	} else {
2620 		__release(&recv_cq->lock);
2621 		__release(&send_cq->lock);
2622 	}
2623 }
2624 
2625 static void get_cqs(enum ib_qp_type qp_type,
2626 		    struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
2627 		    struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
2628 {
2629 	switch (qp_type) {
2630 	case IB_QPT_XRC_TGT:
2631 		*send_cq = NULL;
2632 		*recv_cq = NULL;
2633 		break;
2634 	case MLX5_IB_QPT_REG_UMR:
2635 	case IB_QPT_XRC_INI:
2636 		*send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
2637 		*recv_cq = NULL;
2638 		break;
2639 
2640 	case IB_QPT_SMI:
2641 	case MLX5_IB_QPT_HW_GSI:
2642 	case IB_QPT_RC:
2643 	case IB_QPT_UC:
2644 	case IB_QPT_UD:
2645 	case IB_QPT_RAW_PACKET:
2646 		*send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
2647 		*recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL;
2648 		break;
2649 	default:
2650 		*send_cq = NULL;
2651 		*recv_cq = NULL;
2652 		break;
2653 	}
2654 }
2655 
2656 static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2657 				const struct mlx5_modify_raw_qp_param *raw_qp_param,
2658 				u8 lag_tx_affinity);
2659 
2660 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2661 			      struct ib_udata *udata)
2662 {
2663 	struct mlx5_ib_cq *send_cq, *recv_cq;
2664 	struct mlx5_ib_qp_base *base;
2665 	unsigned long flags;
2666 	int err;
2667 
2668 	if (qp->is_rss) {
2669 		destroy_rss_raw_qp_tir(dev, qp);
2670 		return;
2671 	}
2672 
2673 	base = (qp->type == IB_QPT_RAW_PACKET ||
2674 		qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
2675 		       &qp->raw_packet_qp.rq.base :
2676 		       &qp->trans_qp.base;
2677 
2678 	if (qp->state != IB_QPS_RESET) {
2679 		if (qp->type != IB_QPT_RAW_PACKET &&
2680 		    !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
2681 			err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0,
2682 						  NULL, &base->mqp, NULL);
2683 		} else {
2684 			struct mlx5_modify_raw_qp_param raw_qp_param = {
2685 				.operation = MLX5_CMD_OP_2RST_QP
2686 			};
2687 
2688 			err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0);
2689 		}
2690 		if (err)
2691 			mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
2692 				     base->mqp.qpn);
2693 	}
2694 
2695 	get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq,
2696 		&recv_cq);
2697 
2698 	spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
2699 	mlx5_ib_lock_cqs(send_cq, recv_cq);
2700 	/* del from lists under both locks above to protect reset flow paths */
2701 	list_del(&qp->qps_list);
2702 	if (send_cq)
2703 		list_del(&qp->cq_send_list);
2704 
2705 	if (recv_cq)
2706 		list_del(&qp->cq_recv_list);
2707 
2708 	if (!udata) {
2709 		__mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
2710 				   qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
2711 		if (send_cq != recv_cq)
2712 			__mlx5_ib_cq_clean(send_cq, base->mqp.qpn,
2713 					   NULL);
2714 	}
2715 	mlx5_ib_unlock_cqs(send_cq, recv_cq);
2716 	spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
2717 
2718 	if (qp->type == IB_QPT_RAW_PACKET ||
2719 	    qp->flags & IB_QP_CREATE_SOURCE_QPN) {
2720 		destroy_raw_packet_qp(dev, qp);
2721 	} else {
2722 		err = mlx5_core_destroy_qp(dev, &base->mqp);
2723 		if (err)
2724 			mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n",
2725 				     base->mqp.qpn);
2726 	}
2727 
2728 	destroy_qp(dev, qp, base, udata);
2729 }
2730 
2731 static int create_dct(struct mlx5_ib_dev *dev, struct ib_pd *pd,
2732 		      struct mlx5_ib_qp *qp,
2733 		      struct mlx5_create_qp_params *params)
2734 {
2735 	struct ib_qp_init_attr *attr = params->attr;
2736 	struct mlx5_ib_create_qp *ucmd = params->ucmd;
2737 	u32 uidx = params->uidx;
2738 	void *dctc;
2739 
2740 	if (mlx5_lag_is_active(dev->mdev) && !MLX5_CAP_GEN(dev->mdev, lag_dct))
2741 		return -EOPNOTSUPP;
2742 
2743 	qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
2744 	if (!qp->dct.in)
2745 		return -ENOMEM;
2746 
2747 	MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
2748 	dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
2749 	MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
2750 	MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
2751 	MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
2752 	MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
2753 	MLX5_SET(dctc, dctc, user_index, uidx);
2754 	if (MLX5_CAP_GEN(dev->mdev, ece_support))
2755 		MLX5_SET(dctc, dctc, ece, ucmd->ece_options);
2756 
2757 	if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) {
2758 		int rcqe_sz = mlx5_ib_get_cqe_size(attr->recv_cq);
2759 
2760 		if (rcqe_sz == 128)
2761 			MLX5_SET(dctc, dctc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
2762 	}
2763 
2764 	qp->state = IB_QPS_RESET;
2765 	return 0;
2766 }
2767 
2768 static int check_qp_type(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
2769 			 enum ib_qp_type *type)
2770 {
2771 	if (attr->qp_type == IB_QPT_DRIVER && !MLX5_CAP_GEN(dev->mdev, dct))
2772 		goto out;
2773 
2774 	switch (attr->qp_type) {
2775 	case IB_QPT_XRC_TGT:
2776 	case IB_QPT_XRC_INI:
2777 		if (!MLX5_CAP_GEN(dev->mdev, xrc))
2778 			goto out;
2779 		fallthrough;
2780 	case IB_QPT_RC:
2781 	case IB_QPT_UC:
2782 	case IB_QPT_SMI:
2783 	case MLX5_IB_QPT_HW_GSI:
2784 	case IB_QPT_DRIVER:
2785 	case IB_QPT_GSI:
2786 	case IB_QPT_RAW_PACKET:
2787 	case IB_QPT_UD:
2788 	case MLX5_IB_QPT_REG_UMR:
2789 		break;
2790 	default:
2791 		goto out;
2792 	}
2793 
2794 	*type = attr->qp_type;
2795 	return 0;
2796 
2797 out:
2798 	mlx5_ib_dbg(dev, "Unsupported QP type %d\n", attr->qp_type);
2799 	return -EOPNOTSUPP;
2800 }
2801 
2802 static int check_valid_flow(struct mlx5_ib_dev *dev, struct ib_pd *pd,
2803 			    struct ib_qp_init_attr *attr,
2804 			    struct ib_udata *udata)
2805 {
2806 	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
2807 		udata, struct mlx5_ib_ucontext, ibucontext);
2808 
2809 	if (!udata) {
2810 		/* Kernel create_qp callers */
2811 		if (attr->rwq_ind_tbl)
2812 			return -EOPNOTSUPP;
2813 
2814 		switch (attr->qp_type) {
2815 		case IB_QPT_RAW_PACKET:
2816 		case IB_QPT_DRIVER:
2817 			return -EOPNOTSUPP;
2818 		default:
2819 			return 0;
2820 		}
2821 	}
2822 
2823 	/* Userspace create_qp callers */
2824 	if (attr->qp_type == IB_QPT_RAW_PACKET && !ucontext->cqe_version) {
2825 		mlx5_ib_dbg(dev,
2826 			"Raw Packet QP is only supported for CQE version > 0\n");
2827 		return -EINVAL;
2828 	}
2829 
2830 	if (attr->qp_type != IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) {
2831 		mlx5_ib_dbg(dev,
2832 			    "Wrong QP type %d for the RWQ indirect table\n",
2833 			    attr->qp_type);
2834 		return -EINVAL;
2835 	}
2836 
2837 	/*
2838 	 * We don't need to see this warning, it means that kernel code
2839 	 * missing ib_pd. Placed here to catch developer's mistakes.
2840 	 */
2841 	WARN_ONCE(!pd && attr->qp_type != IB_QPT_XRC_TGT,
2842 		  "There is a missing PD pointer assignment\n");
2843 	return 0;
2844 }
2845 
2846 static bool get_dp_ooo_cap(struct mlx5_core_dev *mdev, enum ib_qp_type qp_type)
2847 {
2848 	if (!MLX5_CAP_GEN_2(mdev, dp_ordering_force))
2849 		return false;
2850 
2851 	switch (qp_type) {
2852 	case IB_QPT_RC:
2853 		return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_rc);
2854 	case IB_QPT_XRC_INI:
2855 	case IB_QPT_XRC_TGT:
2856 		return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_xrc);
2857 	case IB_QPT_UC:
2858 		return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_uc);
2859 	case IB_QPT_UD:
2860 		return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_ud);
2861 	case MLX5_IB_QPT_DCI:
2862 	case MLX5_IB_QPT_DCT:
2863 		return MLX5_CAP_GEN(mdev, dp_ordering_ooo_all_dc);
2864 	default:
2865 		return false;
2866 	}
2867 }
2868 
2869 static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
2870 				bool cond, struct mlx5_ib_qp *qp)
2871 {
2872 	if (!(*flags & flag))
2873 		return;
2874 
2875 	if (cond) {
2876 		qp->flags_en |= flag;
2877 		*flags &= ~flag;
2878 		return;
2879 	}
2880 
2881 	switch (flag) {
2882 	case MLX5_QP_FLAG_SCATTER_CQE:
2883 	case MLX5_QP_FLAG_ALLOW_SCATTER_CQE:
2884 		/*
2885 		 * We don't return error if these flags were provided,
2886 		 * and mlx5 doesn't have right capability.
2887 		 */
2888 		*flags &= ~(MLX5_QP_FLAG_SCATTER_CQE |
2889 			    MLX5_QP_FLAG_ALLOW_SCATTER_CQE);
2890 		return;
2891 	default:
2892 		break;
2893 	}
2894 	mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
2895 }
2896 
2897 static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2898 				void *ucmd, struct ib_qp_init_attr *attr)
2899 {
2900 	struct mlx5_core_dev *mdev = dev->mdev;
2901 	bool cond;
2902 	int flags;
2903 
2904 	if (attr->rwq_ind_tbl)
2905 		flags = ((struct mlx5_ib_create_qp_rss *)ucmd)->flags;
2906 	else
2907 		flags = ((struct mlx5_ib_create_qp *)ucmd)->flags;
2908 
2909 	switch (flags & (MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI)) {
2910 	case MLX5_QP_FLAG_TYPE_DCI:
2911 		qp->type = MLX5_IB_QPT_DCI;
2912 		break;
2913 	case MLX5_QP_FLAG_TYPE_DCT:
2914 		qp->type = MLX5_IB_QPT_DCT;
2915 		break;
2916 	default:
2917 		if (qp->type != IB_QPT_DRIVER)
2918 			break;
2919 		/*
2920 		 * It is IB_QPT_DRIVER and or no subtype or
2921 		 * wrong subtype were provided.
2922 		 */
2923 		return -EINVAL;
2924 	}
2925 
2926 	process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCI, true, qp);
2927 	process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCT, true, qp);
2928 	process_vendor_flag(dev, &flags, MLX5_QP_FLAG_DCI_STREAM,
2929 			    MLX5_CAP_GEN(mdev, log_max_dci_stream_channels),
2930 			    qp);
2931 
2932 	process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
2933 	process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
2934 			    MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
2935 	process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE,
2936 			    MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
2937 
2938 	if (qp->type == IB_QPT_RAW_PACKET) {
2939 		cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
2940 		       MLX5_CAP_ETH(mdev, tunnel_stateless_gre) ||
2941 		       MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx);
2942 		process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TUNNEL_OFFLOADS,
2943 				    cond, qp);
2944 		process_vendor_flag(dev, &flags,
2945 				    MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC, true,
2946 				    qp);
2947 		process_vendor_flag(dev, &flags,
2948 				    MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC, true,
2949 				    qp);
2950 	}
2951 
2952 	if (qp->type == IB_QPT_RC)
2953 		process_vendor_flag(dev, &flags,
2954 				    MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE,
2955 				    MLX5_CAP_GEN(mdev, qp_packet_based), qp);
2956 
2957 	process_vendor_flag(dev, &flags, MLX5_QP_FLAG_BFREG_INDEX, true, qp);
2958 	process_vendor_flag(dev, &flags, MLX5_QP_FLAG_UAR_PAGE_INDEX, true, qp);
2959 
2960 	cond = qp->flags_en & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS |
2961 				MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
2962 				MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC);
2963 	if (attr->rwq_ind_tbl && cond) {
2964 		mlx5_ib_dbg(dev, "RSS RAW QP has unsupported flags 0x%X\n",
2965 			    cond);
2966 		return -EINVAL;
2967 	}
2968 
2969 	if (flags)
2970 		mlx5_ib_dbg(dev, "udata has unsupported flags 0x%X\n", flags);
2971 
2972 	return (flags) ? -EINVAL : 0;
2973 	}
2974 
2975 static void process_create_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
2976 				bool cond, struct mlx5_ib_qp *qp)
2977 {
2978 	if (!(*flags & flag))
2979 		return;
2980 
2981 	if (cond) {
2982 		qp->flags |= flag;
2983 		*flags &= ~flag;
2984 		return;
2985 	}
2986 
2987 	mlx5_ib_dbg(dev, "Verbs create QP flag 0x%X is not supported\n", flag);
2988 }
2989 
2990 static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
2991 				struct ib_qp_init_attr *attr)
2992 {
2993 	enum ib_qp_type qp_type = qp->type;
2994 	struct mlx5_core_dev *mdev = dev->mdev;
2995 	int create_flags = attr->create_flags;
2996 	bool cond;
2997 
2998 	if (qp_type == MLX5_IB_QPT_DCT)
2999 		return (create_flags) ? -EINVAL : 0;
3000 
3001 	if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
3002 		return (create_flags) ? -EINVAL : 0;
3003 
3004 	process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP,
3005 			    mlx5_get_flow_namespace(dev->mdev,
3006 						    MLX5_FLOW_NAMESPACE_BYPASS),
3007 			    qp);
3008 	process_create_flag(dev, &create_flags,
3009 			    IB_QP_CREATE_INTEGRITY_EN,
3010 			    MLX5_CAP_GEN(mdev, sho), qp);
3011 	process_create_flag(dev, &create_flags,
3012 			    IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
3013 			    MLX5_CAP_GEN(mdev, block_lb_mc), qp);
3014 	process_create_flag(dev, &create_flags, IB_QP_CREATE_CROSS_CHANNEL,
3015 			    MLX5_CAP_GEN(mdev, cd), qp);
3016 	process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_SEND,
3017 			    MLX5_CAP_GEN(mdev, cd), qp);
3018 	process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_RECV,
3019 			    MLX5_CAP_GEN(mdev, cd), qp);
3020 
3021 	if (qp_type == IB_QPT_UD) {
3022 		process_create_flag(dev, &create_flags,
3023 				    IB_QP_CREATE_IPOIB_UD_LSO,
3024 				    MLX5_CAP_GEN(mdev, ipoib_basic_offloads),
3025 				    qp);
3026 		cond = MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_IB;
3027 		process_create_flag(dev, &create_flags, IB_QP_CREATE_SOURCE_QPN,
3028 				    cond, qp);
3029 	}
3030 
3031 	if (qp_type == IB_QPT_RAW_PACKET) {
3032 		cond = MLX5_CAP_GEN(mdev, eth_net_offloads) &&
3033 		       MLX5_CAP_ETH(mdev, scatter_fcs);
3034 		process_create_flag(dev, &create_flags,
3035 				    IB_QP_CREATE_SCATTER_FCS, cond, qp);
3036 
3037 		cond = MLX5_CAP_GEN(mdev, eth_net_offloads) &&
3038 		       MLX5_CAP_ETH(mdev, vlan_cap);
3039 		process_create_flag(dev, &create_flags,
3040 				    IB_QP_CREATE_CVLAN_STRIPPING, cond, qp);
3041 	}
3042 
3043 	process_create_flag(dev, &create_flags,
3044 			    IB_QP_CREATE_PCI_WRITE_END_PADDING,
3045 			    MLX5_CAP_GEN(mdev, end_pad), qp);
3046 
3047 	process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_SQPN_QP1,
3048 			    true, qp);
3049 
3050 	if (create_flags) {
3051 		mlx5_ib_dbg(dev, "Create QP has unsupported flags 0x%X\n",
3052 			    create_flags);
3053 		return -EOPNOTSUPP;
3054 	}
3055 	return 0;
3056 }
3057 
3058 static int process_udata_size(struct mlx5_ib_dev *dev,
3059 			      struct mlx5_create_qp_params *params)
3060 {
3061 	size_t ucmd = sizeof(struct mlx5_ib_create_qp);
3062 	struct ib_udata *udata = params->udata;
3063 	size_t outlen = udata->outlen;
3064 	size_t inlen = udata->inlen;
3065 
3066 	params->outlen = min(outlen, sizeof(struct mlx5_ib_create_qp_resp));
3067 	params->ucmd_size = ucmd;
3068 	if (!params->is_rss_raw) {
3069 		/* User has old rdma-core, which doesn't support ECE */
3070 		size_t min_inlen =
3071 			offsetof(struct mlx5_ib_create_qp, ece_options);
3072 
3073 		/*
3074 		 * We will check in check_ucmd_data() that user
3075 		 * cleared everything after inlen.
3076 		 */
3077 		params->inlen = (inlen < min_inlen) ? 0 : min(inlen, ucmd);
3078 		goto out;
3079 	}
3080 
3081 	/* RSS RAW QP */
3082 	if (inlen < offsetofend(struct mlx5_ib_create_qp_rss, flags))
3083 		return -EINVAL;
3084 
3085 	if (outlen < offsetofend(struct mlx5_ib_create_qp_resp, bfreg_index))
3086 		return -EINVAL;
3087 
3088 	ucmd = sizeof(struct mlx5_ib_create_qp_rss);
3089 	params->ucmd_size = ucmd;
3090 	if (inlen > ucmd && !ib_is_udata_cleared(udata, ucmd, inlen - ucmd))
3091 		return -EINVAL;
3092 
3093 	params->inlen = min(ucmd, inlen);
3094 out:
3095 	if (!params->inlen)
3096 		mlx5_ib_dbg(dev, "udata is too small\n");
3097 
3098 	return (params->inlen) ? 0 : -EINVAL;
3099 }
3100 
3101 static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
3102 		     struct mlx5_ib_qp *qp,
3103 		     struct mlx5_create_qp_params *params)
3104 {
3105 	int err;
3106 
3107 	if (params->is_rss_raw) {
3108 		err = create_rss_raw_qp_tir(dev, pd, qp, params);
3109 		goto out;
3110 	}
3111 
3112 	switch (qp->type) {
3113 	case MLX5_IB_QPT_DCT:
3114 		err = create_dct(dev, pd, qp, params);
3115 		break;
3116 	case MLX5_IB_QPT_DCI:
3117 		err = create_dci(dev, pd, qp, params);
3118 		break;
3119 	case IB_QPT_XRC_TGT:
3120 		err = create_xrc_tgt_qp(dev, qp, params);
3121 		break;
3122 	case IB_QPT_GSI:
3123 		err = mlx5_ib_create_gsi(pd, qp, params->attr);
3124 		break;
3125 	case MLX5_IB_QPT_HW_GSI:
3126 		rdma_restrack_no_track(&qp->ibqp.res);
3127 		fallthrough;
3128 	case MLX5_IB_QPT_REG_UMR:
3129 	default:
3130 		if (params->udata)
3131 			err = create_user_qp(dev, pd, qp, params);
3132 		else
3133 			err = create_kernel_qp(dev, pd, qp, params);
3134 	}
3135 
3136 out:
3137 	if (err) {
3138 		mlx5_ib_err(dev, "Create QP type %d failed\n", qp->type);
3139 		return err;
3140 	}
3141 
3142 	if (is_qp0(qp->type))
3143 		qp->ibqp.qp_num = 0;
3144 	else if (is_qp1(qp->type))
3145 		qp->ibqp.qp_num = 1;
3146 	else
3147 		qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn;
3148 
3149 	mlx5_ib_dbg(dev,
3150 		"QP type %d, ib qpn 0x%X, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x, ece 0x%x\n",
3151 		qp->type, qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
3152 		params->attr->recv_cq ? to_mcq(params->attr->recv_cq)->mcq.cqn :
3153 					-1,
3154 		params->attr->send_cq ? to_mcq(params->attr->send_cq)->mcq.cqn :
3155 					-1,
3156 		params->resp.ece_options);
3157 
3158 	return 0;
3159 }
3160 
3161 static int check_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
3162 			 struct ib_qp_init_attr *attr)
3163 {
3164 	int ret = 0;
3165 
3166 	switch (qp->type) {
3167 	case MLX5_IB_QPT_DCT:
3168 		ret = (!attr->srq || !attr->recv_cq) ? -EINVAL : 0;
3169 		break;
3170 	case MLX5_IB_QPT_DCI:
3171 		ret = (attr->cap.max_recv_wr || attr->cap.max_recv_sge) ?
3172 			      -EINVAL :
3173 			      0;
3174 		break;
3175 	case IB_QPT_RAW_PACKET:
3176 		ret = (attr->rwq_ind_tbl && attr->send_cq) ? -EINVAL : 0;
3177 		break;
3178 	default:
3179 		break;
3180 	}
3181 
3182 	if (ret)
3183 		mlx5_ib_dbg(dev, "QP type %d has wrong attributes\n", qp->type);
3184 
3185 	return ret;
3186 }
3187 
3188 static int get_qp_uidx(struct mlx5_ib_qp *qp,
3189 		       struct mlx5_create_qp_params *params)
3190 {
3191 	struct mlx5_ib_create_qp *ucmd = params->ucmd;
3192 	struct ib_udata *udata = params->udata;
3193 	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
3194 		udata, struct mlx5_ib_ucontext, ibucontext);
3195 
3196 	if (params->is_rss_raw)
3197 		return 0;
3198 
3199 	return get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), &params->uidx);
3200 }
3201 
3202 static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
3203 {
3204 	struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device);
3205 
3206 	if (mqp->state == IB_QPS_RTR) {
3207 		int err;
3208 
3209 		err = mlx5_core_destroy_dct(dev, &mqp->dct.mdct);
3210 		if (err) {
3211 			mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err);
3212 			return err;
3213 		}
3214 	}
3215 
3216 	kfree(mqp->dct.in);
3217 	return 0;
3218 }
3219 
3220 static int check_ucmd_data(struct mlx5_ib_dev *dev,
3221 			   struct mlx5_create_qp_params *params)
3222 {
3223 	struct ib_udata *udata = params->udata;
3224 	size_t size, last;
3225 	int ret;
3226 
3227 	if (params->is_rss_raw)
3228 		/*
3229 		 * These QPs don't have "reserved" field in their
3230 		 * create_qp input struct, so their data is always valid.
3231 		 */
3232 		last = sizeof(struct mlx5_ib_create_qp_rss);
3233 	else
3234 		last = offsetof(struct mlx5_ib_create_qp, reserved);
3235 
3236 	if (udata->inlen <= last)
3237 		return 0;
3238 
3239 	/*
3240 	 * User provides different create_qp structures based on the
3241 	 * flow and we need to know if he cleared memory after our
3242 	 * struct create_qp ends.
3243 	 */
3244 	size = udata->inlen - last;
3245 	ret = ib_is_udata_cleared(params->udata, last, size);
3246 	if (!ret)
3247 		mlx5_ib_dbg(
3248 			dev,
3249 			"udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n",
3250 			udata->inlen, params->ucmd_size, last, size);
3251 	return ret ? 0 : -EINVAL;
3252 }
3253 
3254 int mlx5_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
3255 		      struct ib_udata *udata)
3256 {
3257 	struct mlx5_create_qp_params params = {};
3258 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3259 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
3260 	struct ib_pd *pd = ibqp->pd;
3261 	enum ib_qp_type type;
3262 	int err;
3263 
3264 	err = mlx5_ib_dev_res_srq_init(dev);
3265 	if (err)
3266 		return err;
3267 
3268 	err = check_qp_type(dev, attr, &type);
3269 	if (err)
3270 		return err;
3271 
3272 	err = check_valid_flow(dev, pd, attr, udata);
3273 	if (err)
3274 		return err;
3275 
3276 	params.udata = udata;
3277 	params.uidx = MLX5_IB_DEFAULT_UIDX;
3278 	params.attr = attr;
3279 	params.is_rss_raw = !!attr->rwq_ind_tbl;
3280 
3281 	if (udata) {
3282 		err = process_udata_size(dev, &params);
3283 		if (err)
3284 			return err;
3285 
3286 		err = check_ucmd_data(dev, &params);
3287 		if (err)
3288 			return err;
3289 
3290 		params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL);
3291 		if (!params.ucmd)
3292 			return -ENOMEM;
3293 
3294 		err = ib_copy_from_udata(params.ucmd, udata, params.inlen);
3295 		if (err)
3296 			goto free_ucmd;
3297 	}
3298 
3299 	mutex_init(&qp->mutex);
3300 	qp->type = type;
3301 	if (udata) {
3302 		err = process_vendor_flags(dev, qp, params.ucmd, attr);
3303 		if (err)
3304 			goto free_ucmd;
3305 
3306 		err = get_qp_uidx(qp, &params);
3307 		if (err)
3308 			goto free_ucmd;
3309 	}
3310 	err = process_create_flags(dev, qp, attr);
3311 	if (err)
3312 		goto free_ucmd;
3313 
3314 	err = check_qp_attr(dev, qp, attr);
3315 	if (err)
3316 		goto free_ucmd;
3317 
3318 	err = create_qp(dev, pd, qp, &params);
3319 	if (err)
3320 		goto free_ucmd;
3321 
3322 	kfree(params.ucmd);
3323 	params.ucmd = NULL;
3324 
3325 	if (udata)
3326 		/*
3327 		 * It is safe to copy response for all user create QP flows,
3328 		 * including MLX5_IB_QPT_DCT, which doesn't need it.
3329 		 * In that case, resp will be filled with zeros.
3330 		 */
3331 		err = ib_copy_to_udata(udata, &params.resp, params.outlen);
3332 	if (err)
3333 		goto destroy_qp;
3334 
3335 	return 0;
3336 
3337 destroy_qp:
3338 	switch (qp->type) {
3339 	case MLX5_IB_QPT_DCT:
3340 		mlx5_ib_destroy_dct(qp);
3341 		break;
3342 	case IB_QPT_GSI:
3343 		mlx5_ib_destroy_gsi(qp);
3344 		break;
3345 	default:
3346 		destroy_qp_common(dev, qp, udata);
3347 	}
3348 
3349 free_ucmd:
3350 	kfree(params.ucmd);
3351 	return err;
3352 }
3353 
3354 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
3355 {
3356 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
3357 	struct mlx5_ib_qp *mqp = to_mqp(qp);
3358 
3359 	if (mqp->type == IB_QPT_GSI)
3360 		return mlx5_ib_destroy_gsi(mqp);
3361 
3362 	if (mqp->type == MLX5_IB_QPT_DCT)
3363 		return mlx5_ib_destroy_dct(mqp);
3364 
3365 	destroy_qp_common(dev, mqp, udata);
3366 	return 0;
3367 }
3368 
3369 static int set_qpc_atomic_flags(struct mlx5_ib_qp *qp,
3370 				const struct ib_qp_attr *attr, int attr_mask,
3371 				void *qpc)
3372 {
3373 	struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
3374 	u8 dest_rd_atomic;
3375 	u32 access_flags;
3376 
3377 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3378 		dest_rd_atomic = attr->max_dest_rd_atomic;
3379 	else
3380 		dest_rd_atomic = qp->trans_qp.resp_depth;
3381 
3382 	if (attr_mask & IB_QP_ACCESS_FLAGS)
3383 		access_flags = attr->qp_access_flags;
3384 	else
3385 		access_flags = qp->trans_qp.atomic_rd_en;
3386 
3387 	if (!dest_rd_atomic)
3388 		access_flags &= IB_ACCESS_REMOTE_WRITE;
3389 
3390 	MLX5_SET(qpc, qpc, rre, !!(access_flags & IB_ACCESS_REMOTE_READ));
3391 
3392 	if (access_flags & IB_ACCESS_REMOTE_ATOMIC) {
3393 		int atomic_mode;
3394 
3395 		atomic_mode = get_atomic_mode(dev, qp);
3396 		if (atomic_mode < 0)
3397 			return -EOPNOTSUPP;
3398 
3399 		MLX5_SET(qpc, qpc, rae, 1);
3400 		MLX5_SET(qpc, qpc, atomic_mode, atomic_mode);
3401 	}
3402 
3403 	MLX5_SET(qpc, qpc, rwe, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
3404 	return 0;
3405 }
3406 
3407 enum {
3408 	MLX5_PATH_FLAG_FL	= 1 << 0,
3409 	MLX5_PATH_FLAG_FREE_AR	= 1 << 1,
3410 	MLX5_PATH_FLAG_COUNTER	= 1 << 2,
3411 };
3412 
3413 static int mlx5_to_ib_rate_map(u8 rate)
3414 {
3415 	static const int rates[] = { IB_RATE_PORT_CURRENT, IB_RATE_56_GBPS,
3416 				     IB_RATE_25_GBPS,	   IB_RATE_100_GBPS,
3417 				     IB_RATE_200_GBPS,	   IB_RATE_50_GBPS,
3418 				     IB_RATE_400_GBPS };
3419 
3420 	if (rate < ARRAY_SIZE(rates))
3421 		return rates[rate];
3422 
3423 	return rate - MLX5_STAT_RATE_OFFSET;
3424 }
3425 
3426 static int ib_to_mlx5_rate_map(u8 rate)
3427 {
3428 	switch (rate) {
3429 	case IB_RATE_PORT_CURRENT:
3430 		return 0;
3431 	case IB_RATE_56_GBPS:
3432 		return 1;
3433 	case IB_RATE_25_GBPS:
3434 		return 2;
3435 	case IB_RATE_100_GBPS:
3436 		return 3;
3437 	case IB_RATE_200_GBPS:
3438 		return 4;
3439 	case IB_RATE_50_GBPS:
3440 		return 5;
3441 	case IB_RATE_400_GBPS:
3442 		return 6;
3443 	default:
3444 		return rate + MLX5_STAT_RATE_OFFSET;
3445 	}
3446 
3447 	return 0;
3448 }
3449 
3450 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
3451 {
3452 	u32 stat_rate_support;
3453 
3454 	if (rate == IB_RATE_PORT_CURRENT)
3455 		return 0;
3456 
3457 	if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_800_GBPS)
3458 		return -EINVAL;
3459 
3460 	stat_rate_support = MLX5_CAP_GEN(dev->mdev, stat_rate_support);
3461 	while (rate != IB_RATE_PORT_CURRENT &&
3462 	       !(1 << ib_to_mlx5_rate_map(rate) & stat_rate_support))
3463 		--rate;
3464 
3465 	return ib_to_mlx5_rate_map(rate);
3466 }
3467 
3468 static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
3469 				      struct mlx5_ib_sq *sq, u8 sl,
3470 				      struct ib_pd *pd)
3471 {
3472 	void *in;
3473 	void *tisc;
3474 	int inlen;
3475 	int err;
3476 
3477 	inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
3478 	in = kvzalloc(inlen, GFP_KERNEL);
3479 	if (!in)
3480 		return -ENOMEM;
3481 
3482 	MLX5_SET(modify_tis_in, in, bitmask.prio, 1);
3483 	MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
3484 
3485 	tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
3486 	MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1));
3487 
3488 	err = mlx5_core_modify_tis(dev, sq->tisn, in);
3489 
3490 	kvfree(in);
3491 
3492 	return err;
3493 }
3494 
3495 static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
3496 					 struct mlx5_ib_sq *sq, u8 tx_affinity,
3497 					 struct ib_pd *pd)
3498 {
3499 	void *in;
3500 	void *tisc;
3501 	int inlen;
3502 	int err;
3503 
3504 	inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
3505 	in = kvzalloc(inlen, GFP_KERNEL);
3506 	if (!in)
3507 		return -ENOMEM;
3508 
3509 	MLX5_SET(modify_tis_in, in, bitmask.lag_tx_port_affinity, 1);
3510 	MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
3511 
3512 	tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
3513 	MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity);
3514 
3515 	err = mlx5_core_modify_tis(dev, sq->tisn, in);
3516 
3517 	kvfree(in);
3518 
3519 	return err;
3520 }
3521 
3522 static void mlx5_set_path_udp_sport(void *path, const struct rdma_ah_attr *ah,
3523 				    u32 lqpn, u32 rqpn)
3524 
3525 {
3526 	u32 fl = ah->grh.flow_label;
3527 
3528 	if (!fl)
3529 		fl = rdma_calc_flow_label(lqpn, rqpn);
3530 
3531 	MLX5_SET(ads, path, udp_sport, rdma_flow_label_to_udp_sport(fl));
3532 }
3533 
3534 static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
3535 			 const struct rdma_ah_attr *ah, void *path, u8 port,
3536 			 int attr_mask, u32 path_flags,
3537 			 const struct ib_qp_attr *attr, bool alt)
3538 {
3539 	const struct ib_global_route *grh = rdma_ah_read_grh(ah);
3540 	int err;
3541 	enum ib_gid_type gid_type;
3542 	u8 ah_flags = rdma_ah_get_ah_flags(ah);
3543 	u8 sl = rdma_ah_get_sl(ah);
3544 
3545 	if (attr_mask & IB_QP_PKEY_INDEX)
3546 		MLX5_SET(ads, path, pkey_index,
3547 			 alt ? attr->alt_pkey_index : attr->pkey_index);
3548 
3549 	if (ah_flags & IB_AH_GRH) {
3550 		const struct ib_port_immutable *immutable;
3551 
3552 		immutable = ib_port_immutable_read(&dev->ib_dev, port);
3553 		if (grh->sgid_index >= immutable->gid_tbl_len) {
3554 			pr_err("sgid_index (%u) too large. max is %d\n",
3555 			       grh->sgid_index,
3556 			       immutable->gid_tbl_len);
3557 			return -EINVAL;
3558 		}
3559 	}
3560 
3561 	if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) {
3562 		if (!(ah_flags & IB_AH_GRH))
3563 			return -EINVAL;
3564 
3565 		ether_addr_copy(MLX5_ADDR_OF(ads, path, rmac_47_32),
3566 				ah->roce.dmac);
3567 		if ((qp->type == IB_QPT_RC ||
3568 		     qp->type == IB_QPT_UC ||
3569 		     qp->type == IB_QPT_XRC_INI ||
3570 		     qp->type == IB_QPT_XRC_TGT) &&
3571 		    (grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) &&
3572 		    (attr_mask & IB_QP_DEST_QPN))
3573 			mlx5_set_path_udp_sport(path, ah,
3574 						qp->ibqp.qp_num,
3575 						attr->dest_qp_num);
3576 		MLX5_SET(ads, path, eth_prio, sl & 0x7);
3577 		gid_type = ah->grh.sgid_attr->gid_type;
3578 		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
3579 			MLX5_SET(ads, path, dscp, grh->traffic_class >> 2);
3580 	} else {
3581 		MLX5_SET(ads, path, fl, !!(path_flags & MLX5_PATH_FLAG_FL));
3582 		MLX5_SET(ads, path, free_ar,
3583 			 !!(path_flags & MLX5_PATH_FLAG_FREE_AR));
3584 		MLX5_SET(ads, path, rlid, rdma_ah_get_dlid(ah));
3585 		MLX5_SET(ads, path, mlid, rdma_ah_get_path_bits(ah));
3586 		MLX5_SET(ads, path, grh, !!(ah_flags & IB_AH_GRH));
3587 		MLX5_SET(ads, path, sl, sl);
3588 	}
3589 
3590 	if (ah_flags & IB_AH_GRH) {
3591 		MLX5_SET(ads, path, src_addr_index, grh->sgid_index);
3592 		MLX5_SET(ads, path, hop_limit, grh->hop_limit);
3593 		MLX5_SET(ads, path, tclass, grh->traffic_class);
3594 		MLX5_SET(ads, path, flow_label, grh->flow_label);
3595 		memcpy(MLX5_ADDR_OF(ads, path, rgid_rip), grh->dgid.raw,
3596 		       sizeof(grh->dgid.raw));
3597 	}
3598 
3599 	err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah));
3600 	if (err < 0)
3601 		return err;
3602 	MLX5_SET(ads, path, stat_rate, err);
3603 	MLX5_SET(ads, path, vhca_port_num, port);
3604 
3605 	if (attr_mask & IB_QP_TIMEOUT)
3606 		MLX5_SET(ads, path, ack_timeout,
3607 			 alt ? attr->alt_timeout : attr->timeout);
3608 
3609 	if ((qp->type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
3610 		return modify_raw_packet_eth_prio(dev->mdev,
3611 						  &qp->raw_packet_qp.sq,
3612 						  sl & 0xf, qp->ibqp.pd);
3613 
3614 	return 0;
3615 }
3616 
3617 static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
3618 	[MLX5_QP_STATE_INIT] = {
3619 		[MLX5_QP_STATE_INIT] = {
3620 			[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE		|
3621 					  MLX5_QP_OPTPAR_RAE		|
3622 					  MLX5_QP_OPTPAR_RWE		|
3623 					  MLX5_QP_OPTPAR_PKEY_INDEX	|
3624 					  MLX5_QP_OPTPAR_PRI_PORT	|
3625 					  MLX5_QP_OPTPAR_LAG_TX_AFF,
3626 			[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE		|
3627 					  MLX5_QP_OPTPAR_PKEY_INDEX	|
3628 					  MLX5_QP_OPTPAR_PRI_PORT	|
3629 					  MLX5_QP_OPTPAR_LAG_TX_AFF,
3630 			[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX	|
3631 					  MLX5_QP_OPTPAR_Q_KEY		|
3632 					  MLX5_QP_OPTPAR_PRI_PORT,
3633 			[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE		|
3634 					  MLX5_QP_OPTPAR_RAE		|
3635 					  MLX5_QP_OPTPAR_RWE		|
3636 					  MLX5_QP_OPTPAR_PKEY_INDEX	|
3637 					  MLX5_QP_OPTPAR_PRI_PORT	|
3638 					  MLX5_QP_OPTPAR_LAG_TX_AFF,
3639 		},
3640 		[MLX5_QP_STATE_RTR] = {
3641 			[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
3642 					  MLX5_QP_OPTPAR_RRE            |
3643 					  MLX5_QP_OPTPAR_RAE            |
3644 					  MLX5_QP_OPTPAR_RWE            |
3645 					  MLX5_QP_OPTPAR_PKEY_INDEX	|
3646 					  MLX5_QP_OPTPAR_LAG_TX_AFF,
3647 			[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
3648 					  MLX5_QP_OPTPAR_RWE            |
3649 					  MLX5_QP_OPTPAR_PKEY_INDEX	|
3650 					  MLX5_QP_OPTPAR_LAG_TX_AFF,
3651 			[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX     |
3652 					  MLX5_QP_OPTPAR_Q_KEY,
3653 			[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX	|
3654 					   MLX5_QP_OPTPAR_Q_KEY,
3655 			[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
3656 					  MLX5_QP_OPTPAR_RRE            |
3657 					  MLX5_QP_OPTPAR_RAE            |
3658 					  MLX5_QP_OPTPAR_RWE            |
3659 					  MLX5_QP_OPTPAR_PKEY_INDEX	|
3660 					  MLX5_QP_OPTPAR_LAG_TX_AFF,
3661 		},
3662 	},
3663 	[MLX5_QP_STATE_RTR] = {
3664 		[MLX5_QP_STATE_RTS] = {
3665 			[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH	|
3666 					  MLX5_QP_OPTPAR_RRE		|
3667 					  MLX5_QP_OPTPAR_RAE		|
3668 					  MLX5_QP_OPTPAR_RWE		|
3669 					  MLX5_QP_OPTPAR_PM_STATE	|
3670 					  MLX5_QP_OPTPAR_RNR_TIMEOUT,
3671 			[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH	|
3672 					  MLX5_QP_OPTPAR_RWE		|
3673 					  MLX5_QP_OPTPAR_PM_STATE,
3674 			[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
3675 			[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH	|
3676 					  MLX5_QP_OPTPAR_RRE		|
3677 					  MLX5_QP_OPTPAR_RAE		|
3678 					  MLX5_QP_OPTPAR_RWE		|
3679 					  MLX5_QP_OPTPAR_PM_STATE	|
3680 					  MLX5_QP_OPTPAR_RNR_TIMEOUT,
3681 		},
3682 	},
3683 	[MLX5_QP_STATE_RTS] = {
3684 		[MLX5_QP_STATE_RTS] = {
3685 			[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE		|
3686 					  MLX5_QP_OPTPAR_RAE		|
3687 					  MLX5_QP_OPTPAR_RWE		|
3688 					  MLX5_QP_OPTPAR_RNR_TIMEOUT	|
3689 					  MLX5_QP_OPTPAR_PM_STATE	|
3690 					  MLX5_QP_OPTPAR_ALT_ADDR_PATH,
3691 			[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE		|
3692 					  MLX5_QP_OPTPAR_PM_STATE	|
3693 					  MLX5_QP_OPTPAR_ALT_ADDR_PATH,
3694 			[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY		|
3695 					  MLX5_QP_OPTPAR_SRQN		|
3696 					  MLX5_QP_OPTPAR_CQN_RCV,
3697 			[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE		|
3698 					  MLX5_QP_OPTPAR_RAE		|
3699 					  MLX5_QP_OPTPAR_RWE		|
3700 					  MLX5_QP_OPTPAR_RNR_TIMEOUT	|
3701 					  MLX5_QP_OPTPAR_PM_STATE	|
3702 					  MLX5_QP_OPTPAR_ALT_ADDR_PATH,
3703 		},
3704 	},
3705 	[MLX5_QP_STATE_SQER] = {
3706 		[MLX5_QP_STATE_RTS] = {
3707 			[MLX5_QP_ST_UD]	 = MLX5_QP_OPTPAR_Q_KEY,
3708 			[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
3709 			[MLX5_QP_ST_UC]	 = MLX5_QP_OPTPAR_RWE,
3710 			[MLX5_QP_ST_RC]	 = MLX5_QP_OPTPAR_RNR_TIMEOUT	|
3711 					   MLX5_QP_OPTPAR_RWE		|
3712 					   MLX5_QP_OPTPAR_RAE		|
3713 					   MLX5_QP_OPTPAR_RRE,
3714 			[MLX5_QP_ST_XRC]  = MLX5_QP_OPTPAR_RNR_TIMEOUT	|
3715 					   MLX5_QP_OPTPAR_RWE		|
3716 					   MLX5_QP_OPTPAR_RAE		|
3717 					   MLX5_QP_OPTPAR_RRE,
3718 		},
3719 	},
3720 	[MLX5_QP_STATE_SQD] = {
3721 		[MLX5_QP_STATE_RTS] = {
3722 			[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
3723 			[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
3724 			[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
3725 			[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT	|
3726 					  MLX5_QP_OPTPAR_RWE		|
3727 					  MLX5_QP_OPTPAR_RAE		|
3728 					  MLX5_QP_OPTPAR_RRE,
3729 		},
3730 	},
3731 };
3732 
3733 static int ib_nr_to_mlx5_nr(int ib_mask)
3734 {
3735 	switch (ib_mask) {
3736 	case IB_QP_STATE:
3737 		return 0;
3738 	case IB_QP_CUR_STATE:
3739 		return 0;
3740 	case IB_QP_EN_SQD_ASYNC_NOTIFY:
3741 		return 0;
3742 	case IB_QP_ACCESS_FLAGS:
3743 		return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
3744 			MLX5_QP_OPTPAR_RAE;
3745 	case IB_QP_PKEY_INDEX:
3746 		return MLX5_QP_OPTPAR_PKEY_INDEX;
3747 	case IB_QP_PORT:
3748 		return MLX5_QP_OPTPAR_PRI_PORT;
3749 	case IB_QP_QKEY:
3750 		return MLX5_QP_OPTPAR_Q_KEY;
3751 	case IB_QP_AV:
3752 		return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
3753 			MLX5_QP_OPTPAR_PRI_PORT;
3754 	case IB_QP_PATH_MTU:
3755 		return 0;
3756 	case IB_QP_TIMEOUT:
3757 		return MLX5_QP_OPTPAR_ACK_TIMEOUT;
3758 	case IB_QP_RETRY_CNT:
3759 		return MLX5_QP_OPTPAR_RETRY_COUNT;
3760 	case IB_QP_RNR_RETRY:
3761 		return MLX5_QP_OPTPAR_RNR_RETRY;
3762 	case IB_QP_RQ_PSN:
3763 		return 0;
3764 	case IB_QP_MAX_QP_RD_ATOMIC:
3765 		return MLX5_QP_OPTPAR_SRA_MAX;
3766 	case IB_QP_ALT_PATH:
3767 		return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
3768 	case IB_QP_MIN_RNR_TIMER:
3769 		return MLX5_QP_OPTPAR_RNR_TIMEOUT;
3770 	case IB_QP_SQ_PSN:
3771 		return 0;
3772 	case IB_QP_MAX_DEST_RD_ATOMIC:
3773 		return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
3774 			MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
3775 	case IB_QP_PATH_MIG_STATE:
3776 		return MLX5_QP_OPTPAR_PM_STATE;
3777 	case IB_QP_CAP:
3778 		return 0;
3779 	case IB_QP_DEST_QPN:
3780 		return 0;
3781 	}
3782 	return 0;
3783 }
3784 
3785 static int ib_mask_to_mlx5_opt(int ib_mask)
3786 {
3787 	int result = 0;
3788 	int i;
3789 
3790 	for (i = 0; i < 8 * sizeof(int); i++) {
3791 		if ((1 << i) & ib_mask)
3792 			result |= ib_nr_to_mlx5_nr(1 << i);
3793 	}
3794 
3795 	return result;
3796 }
3797 
3798 static int modify_raw_packet_qp_rq(
3799 	struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, int new_state,
3800 	const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
3801 {
3802 	void *in;
3803 	void *rqc;
3804 	int inlen;
3805 	int err;
3806 
3807 	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
3808 	in = kvzalloc(inlen, GFP_KERNEL);
3809 	if (!in)
3810 		return -ENOMEM;
3811 
3812 	MLX5_SET(modify_rq_in, in, rq_state, rq->state);
3813 	MLX5_SET(modify_rq_in, in, uid, to_mpd(pd)->uid);
3814 
3815 	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
3816 	MLX5_SET(rqc, rqc, state, new_state);
3817 
3818 	if (raw_qp_param->set_mask & MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID) {
3819 		if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
3820 			MLX5_SET64(modify_rq_in, in, modify_bitmask,
3821 				   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
3822 			MLX5_SET(rqc, rqc, counter_set_id, raw_qp_param->rq_q_ctr_id);
3823 		} else
3824 			dev_info_once(
3825 				&dev->ib_dev.dev,
3826 				"RAW PACKET QP counters are not supported on current FW\n");
3827 	}
3828 
3829 	err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in);
3830 	if (err)
3831 		goto out;
3832 
3833 	rq->state = new_state;
3834 
3835 out:
3836 	kvfree(in);
3837 	return err;
3838 }
3839 
3840 static int modify_raw_packet_qp_sq(
3841 	struct mlx5_core_dev *dev, struct mlx5_ib_sq *sq, int new_state,
3842 	const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
3843 {
3844 	struct mlx5_ib_qp *ibqp = sq->base.container_mibqp;
3845 	struct mlx5_rate_limit old_rl = ibqp->rl;
3846 	struct mlx5_rate_limit new_rl = old_rl;
3847 	bool new_rate_added = false;
3848 	u16 rl_index = 0;
3849 	void *in;
3850 	void *sqc;
3851 	int inlen;
3852 	int err;
3853 
3854 	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
3855 	in = kvzalloc(inlen, GFP_KERNEL);
3856 	if (!in)
3857 		return -ENOMEM;
3858 
3859 	MLX5_SET(modify_sq_in, in, uid, to_mpd(pd)->uid);
3860 	MLX5_SET(modify_sq_in, in, sq_state, sq->state);
3861 
3862 	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
3863 	MLX5_SET(sqc, sqc, state, new_state);
3864 
3865 	if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) {
3866 		if (new_state != MLX5_SQC_STATE_RDY)
3867 			pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
3868 				__func__);
3869 		else
3870 			new_rl = raw_qp_param->rl;
3871 	}
3872 
3873 	if (!mlx5_rl_are_equal(&old_rl, &new_rl)) {
3874 		if (new_rl.rate) {
3875 			err = mlx5_rl_add_rate(dev, &rl_index, &new_rl);
3876 			if (err) {
3877 				pr_err("Failed configuring rate limit(err %d): \
3878 				       rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
3879 				       err, new_rl.rate, new_rl.max_burst_sz,
3880 				       new_rl.typical_pkt_sz);
3881 
3882 				goto out;
3883 			}
3884 			new_rate_added = true;
3885 		}
3886 
3887 		MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
3888 		/* index 0 means no limit */
3889 		MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
3890 	}
3891 
3892 	err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in);
3893 	if (err) {
3894 		/* Remove new rate from table if failed */
3895 		if (new_rate_added)
3896 			mlx5_rl_remove_rate(dev, &new_rl);
3897 		goto out;
3898 	}
3899 
3900 	/* Only remove the old rate after new rate was set */
3901 	if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
3902 	    (new_state != MLX5_SQC_STATE_RDY)) {
3903 		mlx5_rl_remove_rate(dev, &old_rl);
3904 		if (new_state != MLX5_SQC_STATE_RDY)
3905 			memset(&new_rl, 0, sizeof(new_rl));
3906 	}
3907 
3908 	ibqp->rl = new_rl;
3909 	sq->state = new_state;
3910 
3911 out:
3912 	kvfree(in);
3913 	return err;
3914 }
3915 
3916 static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
3917 				const struct mlx5_modify_raw_qp_param *raw_qp_param,
3918 				u8 tx_affinity)
3919 {
3920 	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
3921 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
3922 	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
3923 	int modify_rq = !!qp->rq.wqe_cnt;
3924 	int modify_sq = !!qp->sq.wqe_cnt;
3925 	int rq_state;
3926 	int sq_state;
3927 	int err;
3928 
3929 	switch (raw_qp_param->operation) {
3930 	case MLX5_CMD_OP_RST2INIT_QP:
3931 		rq_state = MLX5_RQC_STATE_RDY;
3932 		sq_state = MLX5_SQC_STATE_RST;
3933 		break;
3934 	case MLX5_CMD_OP_2ERR_QP:
3935 		rq_state = MLX5_RQC_STATE_ERR;
3936 		sq_state = MLX5_SQC_STATE_ERR;
3937 		break;
3938 	case MLX5_CMD_OP_2RST_QP:
3939 		rq_state = MLX5_RQC_STATE_RST;
3940 		sq_state = MLX5_SQC_STATE_RST;
3941 		break;
3942 	case MLX5_CMD_OP_RTR2RTS_QP:
3943 	case MLX5_CMD_OP_RTS2RTS_QP:
3944 		if (raw_qp_param->set_mask & ~MLX5_RAW_QP_RATE_LIMIT)
3945 			return -EINVAL;
3946 
3947 		modify_rq = 0;
3948 		sq_state = MLX5_SQC_STATE_RDY;
3949 		break;
3950 	case MLX5_CMD_OP_INIT2INIT_QP:
3951 	case MLX5_CMD_OP_INIT2RTR_QP:
3952 		if (raw_qp_param->set_mask)
3953 			return -EINVAL;
3954 		else
3955 			return 0;
3956 	default:
3957 		WARN_ON(1);
3958 		return -EINVAL;
3959 	}
3960 
3961 	if (modify_rq) {
3962 		err =  modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param,
3963 					       qp->ibqp.pd);
3964 		if (err)
3965 			return err;
3966 	}
3967 
3968 	if (modify_sq) {
3969 		struct mlx5_flow_handle *flow_rule;
3970 
3971 		if (tx_affinity) {
3972 			err = modify_raw_packet_tx_affinity(dev->mdev, sq,
3973 							    tx_affinity,
3974 							    qp->ibqp.pd);
3975 			if (err)
3976 				return err;
3977 		}
3978 
3979 		flow_rule = create_flow_rule_vport_sq(dev, sq,
3980 						      raw_qp_param->port);
3981 		if (IS_ERR(flow_rule))
3982 			return PTR_ERR(flow_rule);
3983 
3984 		err = modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
3985 					      raw_qp_param, qp->ibqp.pd);
3986 		if (err) {
3987 			if (flow_rule)
3988 				mlx5_del_flow_rules(flow_rule);
3989 			return err;
3990 		}
3991 
3992 		if (flow_rule) {
3993 			destroy_flow_rule_vport_sq(sq);
3994 			sq->flow_rule = flow_rule;
3995 		}
3996 
3997 		return err;
3998 	}
3999 
4000 	return 0;
4001 }
4002 
4003 static unsigned int get_tx_affinity_rr(struct mlx5_ib_dev *dev,
4004 				       struct ib_udata *udata)
4005 {
4006 	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
4007 		udata, struct mlx5_ib_ucontext, ibucontext);
4008 	u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4009 	atomic_t *tx_port_affinity;
4010 
4011 	if (ucontext)
4012 		tx_port_affinity = &ucontext->tx_port_affinity;
4013 	else
4014 		tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity;
4015 
4016 	return (unsigned int)atomic_add_return(1, tx_port_affinity) %
4017 		(dev->lag_active ? dev->lag_ports : MLX5_CAP_GEN(dev->mdev, num_lag_ports)) + 1;
4018 }
4019 
4020 static bool qp_supports_affinity(struct mlx5_ib_qp *qp)
4021 {
4022 	if ((qp->type == IB_QPT_RC) || (qp->type == IB_QPT_UD) ||
4023 	    (qp->type == IB_QPT_UC) || (qp->type == IB_QPT_RAW_PACKET) ||
4024 	    (qp->type == IB_QPT_XRC_INI) || (qp->type == IB_QPT_XRC_TGT) ||
4025 	    (qp->type == MLX5_IB_QPT_DCI))
4026 		return true;
4027 	return false;
4028 }
4029 
4030 static unsigned int get_tx_affinity(struct ib_qp *qp,
4031 				    const struct ib_qp_attr *attr,
4032 				    int attr_mask, u8 init,
4033 				    struct ib_udata *udata)
4034 {
4035 	struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
4036 		udata, struct mlx5_ib_ucontext, ibucontext);
4037 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
4038 	struct mlx5_ib_qp *mqp = to_mqp(qp);
4039 	struct mlx5_ib_qp_base *qp_base;
4040 	unsigned int tx_affinity;
4041 
4042 	if (!(mlx5_ib_lag_should_assign_affinity(dev) &&
4043 	      qp_supports_affinity(mqp)))
4044 		return 0;
4045 
4046 	if (mqp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
4047 		tx_affinity = mqp->gsi_lag_port;
4048 	else if (init)
4049 		tx_affinity = get_tx_affinity_rr(dev, udata);
4050 	else if ((attr_mask & IB_QP_AV) && attr->xmit_slave)
4051 		tx_affinity =
4052 			mlx5_lag_get_slave_port(dev->mdev, attr->xmit_slave);
4053 	else
4054 		return 0;
4055 
4056 	qp_base = &mqp->trans_qp.base;
4057 	if (ucontext)
4058 		mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
4059 			    tx_affinity, qp_base->mqp.qpn, ucontext);
4060 	else
4061 		mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
4062 			    tx_affinity, qp_base->mqp.qpn);
4063 	return tx_affinity;
4064 }
4065 
4066 static int __mlx5_ib_qp_set_raw_qp_counter(struct mlx5_ib_qp *qp, u32 set_id,
4067 					   struct mlx5_core_dev *mdev)
4068 {
4069 	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
4070 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
4071 	u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {};
4072 	void *rqc;
4073 
4074 	if (!qp->rq.wqe_cnt)
4075 		return 0;
4076 
4077 	MLX5_SET(modify_rq_in, in, rq_state, rq->state);
4078 	MLX5_SET(modify_rq_in, in, uid, to_mpd(qp->ibqp.pd)->uid);
4079 
4080 	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
4081 	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
4082 
4083 	MLX5_SET64(modify_rq_in, in, modify_bitmask,
4084 		   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
4085 	MLX5_SET(rqc, rqc, counter_set_id, set_id);
4086 
4087 	return mlx5_core_modify_rq(mdev, rq->base.mqp.qpn, in);
4088 }
4089 
4090 static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
4091 				    struct rdma_counter *counter)
4092 {
4093 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
4094 	u32 in[MLX5_ST_SZ_DW(rts2rts_qp_in)] = {};
4095 	struct mlx5_ib_qp *mqp = to_mqp(qp);
4096 	struct mlx5_ib_qp_base *base;
4097 	u32 set_id;
4098 	u32 *qpc;
4099 
4100 	if (counter)
4101 		set_id = counter->id;
4102 	else
4103 		set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
4104 
4105 	if (mqp->type == IB_QPT_RAW_PACKET)
4106 		return __mlx5_ib_qp_set_raw_qp_counter(mqp, set_id, dev->mdev);
4107 
4108 	base = &mqp->trans_qp.base;
4109 	MLX5_SET(rts2rts_qp_in, in, opcode, MLX5_CMD_OP_RTS2RTS_QP);
4110 	MLX5_SET(rts2rts_qp_in, in, qpn, base->mqp.qpn);
4111 	MLX5_SET(rts2rts_qp_in, in, uid, base->mqp.uid);
4112 	MLX5_SET(rts2rts_qp_in, in, opt_param_mask,
4113 		 MLX5_QP_OPTPAR_COUNTER_SET_ID);
4114 
4115 	qpc = MLX5_ADDR_OF(rts2rts_qp_in, in, qpc);
4116 	MLX5_SET(qpc, qpc, counter_set_id, set_id);
4117 	return mlx5_cmd_exec_in(dev->mdev, rts2rts_qp, in);
4118 }
4119 
4120 static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
4121 			       const struct ib_qp_attr *attr, int attr_mask,
4122 			       enum ib_qp_state cur_state,
4123 			       enum ib_qp_state new_state,
4124 			       const struct mlx5_ib_modify_qp *ucmd,
4125 			       struct mlx5_ib_modify_qp_resp *resp,
4126 			       struct ib_udata *udata)
4127 {
4128 	static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
4129 		[MLX5_QP_STATE_RST] = {
4130 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
4131 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
4132 			[MLX5_QP_STATE_INIT]	= MLX5_CMD_OP_RST2INIT_QP,
4133 		},
4134 		[MLX5_QP_STATE_INIT]  = {
4135 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
4136 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
4137 			[MLX5_QP_STATE_INIT]	= MLX5_CMD_OP_INIT2INIT_QP,
4138 			[MLX5_QP_STATE_RTR]	= MLX5_CMD_OP_INIT2RTR_QP,
4139 		},
4140 		[MLX5_QP_STATE_RTR]   = {
4141 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
4142 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
4143 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_RTR2RTS_QP,
4144 		},
4145 		[MLX5_QP_STATE_RTS]   = {
4146 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
4147 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
4148 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_RTS2RTS_QP,
4149 		},
4150 		[MLX5_QP_STATE_SQD] = {
4151 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
4152 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
4153 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_SQD_RTS_QP,
4154 		},
4155 		[MLX5_QP_STATE_SQER] = {
4156 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
4157 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
4158 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_SQERR2RTS_QP,
4159 		},
4160 		[MLX5_QP_STATE_ERR] = {
4161 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
4162 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
4163 		}
4164 	};
4165 
4166 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4167 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
4168 	struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
4169 	struct mlx5_ib_cq *send_cq, *recv_cq;
4170 	struct mlx5_ib_pd *pd;
4171 	enum mlx5_qp_state mlx5_cur, mlx5_new;
4172 	void *qpc, *pri_path, *alt_path;
4173 	enum mlx5_qp_optpar optpar = 0;
4174 	u32 set_id = 0;
4175 	int mlx5_st;
4176 	int err;
4177 	u16 op;
4178 	u8 tx_affinity = 0;
4179 
4180 	mlx5_st = to_mlx5_st(qp->type);
4181 	if (mlx5_st < 0)
4182 		return -EINVAL;
4183 
4184 	qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
4185 	if (!qpc)
4186 		return -ENOMEM;
4187 
4188 	pd = to_mpd(qp->ibqp.pd);
4189 	MLX5_SET(qpc, qpc, st, mlx5_st);
4190 
4191 	if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
4192 		MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
4193 	} else {
4194 		switch (attr->path_mig_state) {
4195 		case IB_MIG_MIGRATED:
4196 			MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
4197 			break;
4198 		case IB_MIG_REARM:
4199 			MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_REARM);
4200 			break;
4201 		case IB_MIG_ARMED:
4202 			MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_ARMED);
4203 			break;
4204 		}
4205 	}
4206 
4207 	tx_affinity = get_tx_affinity(ibqp, attr, attr_mask,
4208 				      cur_state == IB_QPS_RESET &&
4209 				      new_state == IB_QPS_INIT, udata);
4210 
4211 	MLX5_SET(qpc, qpc, lag_tx_port_affinity, tx_affinity);
4212 	if (tx_affinity && new_state == IB_QPS_RTR &&
4213 	    MLX5_CAP_GEN(dev->mdev, init2_lag_tx_port_affinity))
4214 		optpar |= MLX5_QP_OPTPAR_LAG_TX_AFF;
4215 
4216 	if (is_sqp(qp->type)) {
4217 		MLX5_SET(qpc, qpc, mtu, IB_MTU_256);
4218 		MLX5_SET(qpc, qpc, log_msg_max, 8);
4219 	} else if ((qp->type == IB_QPT_UD &&
4220 		    !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) ||
4221 		   qp->type == MLX5_IB_QPT_REG_UMR) {
4222 		MLX5_SET(qpc, qpc, mtu, IB_MTU_4096);
4223 		MLX5_SET(qpc, qpc, log_msg_max, 12);
4224 	} else if (attr_mask & IB_QP_PATH_MTU) {
4225 		if (attr->path_mtu < IB_MTU_256 ||
4226 		    attr->path_mtu > IB_MTU_4096) {
4227 			mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
4228 			err = -EINVAL;
4229 			goto out;
4230 		}
4231 		MLX5_SET(qpc, qpc, mtu, attr->path_mtu);
4232 		MLX5_SET(qpc, qpc, log_msg_max,
4233 			 MLX5_CAP_GEN(dev->mdev, log_max_msg));
4234 	}
4235 
4236 	if (attr_mask & IB_QP_DEST_QPN)
4237 		MLX5_SET(qpc, qpc, remote_qpn, attr->dest_qp_num);
4238 
4239 	pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
4240 	alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path);
4241 
4242 	if (attr_mask & IB_QP_PKEY_INDEX)
4243 		MLX5_SET(ads, pri_path, pkey_index, attr->pkey_index);
4244 
4245 	/* todo implement counter_index functionality */
4246 
4247 	if (dev->ib_dev.type == RDMA_DEVICE_TYPE_SMI && is_qp0(qp->type)) {
4248 		MLX5_SET(ads, pri_path, vhca_port_num,
4249 			 smi_to_native_portnum(dev, qp->port));
4250 		if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR)
4251 			MLX5_SET(ads, pri_path, plane_index, qp->port);
4252 	} else if (is_sqp(qp->type))
4253 		MLX5_SET(ads, pri_path, vhca_port_num, qp->port);
4254 
4255 	if (attr_mask & IB_QP_PORT)
4256 		MLX5_SET(ads, pri_path, vhca_port_num, attr->port_num);
4257 
4258 	if (attr_mask & IB_QP_AV) {
4259 		err = mlx5_set_path(dev, qp, &attr->ah_attr, pri_path,
4260 				    attr_mask & IB_QP_PORT ? attr->port_num :
4261 							     qp->port,
4262 				    attr_mask, 0, attr, false);
4263 		if (err)
4264 			goto out;
4265 	}
4266 
4267 	if (attr_mask & IB_QP_TIMEOUT)
4268 		MLX5_SET(ads, pri_path, ack_timeout, attr->timeout);
4269 
4270 	if (attr_mask & IB_QP_ALT_PATH) {
4271 		err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, alt_path,
4272 				    attr->alt_port_num,
4273 				    attr_mask | IB_QP_PKEY_INDEX |
4274 					    IB_QP_TIMEOUT,
4275 				    0, attr, true);
4276 		if (err)
4277 			goto out;
4278 	}
4279 
4280 	get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
4281 		&send_cq, &recv_cq);
4282 
4283 	MLX5_SET(qpc, qpc, pd, pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
4284 	if (send_cq)
4285 		MLX5_SET(qpc, qpc, cqn_snd, send_cq->mcq.cqn);
4286 	if (recv_cq)
4287 		MLX5_SET(qpc, qpc, cqn_rcv, recv_cq->mcq.cqn);
4288 
4289 	MLX5_SET(qpc, qpc, log_ack_req_freq, MLX5_IB_ACK_REQ_FREQ);
4290 
4291 	if (attr_mask & IB_QP_RNR_RETRY)
4292 		MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
4293 
4294 	if (attr_mask & IB_QP_RETRY_CNT)
4295 		MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
4296 
4297 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic)
4298 		MLX5_SET(qpc, qpc, log_sra_max, fls(attr->max_rd_atomic - 1));
4299 
4300 	if (attr_mask & IB_QP_SQ_PSN)
4301 		MLX5_SET(qpc, qpc, next_send_psn, attr->sq_psn);
4302 
4303 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic)
4304 		MLX5_SET(qpc, qpc, log_rra_max,
4305 			 fls(attr->max_dest_rd_atomic - 1));
4306 
4307 	if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
4308 		err = set_qpc_atomic_flags(qp, attr, attr_mask, qpc);
4309 		if (err)
4310 			goto out;
4311 	}
4312 
4313 	if (attr_mask & IB_QP_MIN_RNR_TIMER)
4314 		MLX5_SET(qpc, qpc, min_rnr_nak, attr->min_rnr_timer);
4315 
4316 	if (attr_mask & IB_QP_RQ_PSN)
4317 		MLX5_SET(qpc, qpc, next_rcv_psn, attr->rq_psn);
4318 
4319 	if (attr_mask & IB_QP_QKEY)
4320 		MLX5_SET(qpc, qpc, q_key, attr->qkey);
4321 
4322 	if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
4323 		MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
4324 
4325 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4326 		u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
4327 			       qp->port) - 1;
4328 
4329 		/* Underlay port should be used - index 0 function per port */
4330 		if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
4331 			port_num = 0;
4332 
4333 		if (ibqp->counter)
4334 			set_id = ibqp->counter->id;
4335 		else
4336 			set_id = mlx5_ib_get_counters_id(dev, port_num);
4337 		MLX5_SET(qpc, qpc, counter_set_id, set_id);
4338 	}
4339 
4340 	if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
4341 		MLX5_SET(qpc, qpc, rlky, 1);
4342 
4343 	if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
4344 		MLX5_SET(qpc, qpc, deth_sqpn, 1);
4345 
4346 	if (qp->is_ooo_rq && cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4347 		MLX5_SET(qpc, qpc, dp_ordering_1, 1);
4348 		MLX5_SET(qpc, qpc, dp_ordering_force, 1);
4349 	}
4350 
4351 	mlx5_cur = to_mlx5_state(cur_state);
4352 	mlx5_new = to_mlx5_state(new_state);
4353 
4354 	if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
4355 	    !optab[mlx5_cur][mlx5_new]) {
4356 		err = -EINVAL;
4357 		goto out;
4358 	}
4359 
4360 	op = optab[mlx5_cur][mlx5_new];
4361 	optpar |= ib_mask_to_mlx5_opt(attr_mask);
4362 	optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
4363 
4364 	if (qp->type == IB_QPT_RAW_PACKET ||
4365 	    qp->flags & IB_QP_CREATE_SOURCE_QPN) {
4366 		struct mlx5_modify_raw_qp_param raw_qp_param = {};
4367 
4368 		raw_qp_param.operation = op;
4369 		if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4370 			raw_qp_param.rq_q_ctr_id = set_id;
4371 			raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
4372 		}
4373 
4374 		if (attr_mask & IB_QP_PORT)
4375 			raw_qp_param.port = attr->port_num;
4376 
4377 		if (attr_mask & IB_QP_RATE_LIMIT) {
4378 			raw_qp_param.rl.rate = attr->rate_limit;
4379 
4380 			if (ucmd->burst_info.max_burst_sz) {
4381 				if (attr->rate_limit &&
4382 				    MLX5_CAP_QOS(dev->mdev, packet_pacing_burst_bound)) {
4383 					raw_qp_param.rl.max_burst_sz =
4384 						ucmd->burst_info.max_burst_sz;
4385 				} else {
4386 					err = -EINVAL;
4387 					goto out;
4388 				}
4389 			}
4390 
4391 			if (ucmd->burst_info.typical_pkt_sz) {
4392 				if (attr->rate_limit &&
4393 				    MLX5_CAP_QOS(dev->mdev, packet_pacing_typical_size)) {
4394 					raw_qp_param.rl.typical_pkt_sz =
4395 						ucmd->burst_info.typical_pkt_sz;
4396 				} else {
4397 					err = -EINVAL;
4398 					goto out;
4399 				}
4400 			}
4401 
4402 			raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT;
4403 		}
4404 
4405 		err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
4406 	} else {
4407 		if (udata) {
4408 			/* For the kernel flows, the resp will stay zero */
4409 			resp->ece_options =
4410 				MLX5_CAP_GEN(dev->mdev, ece_support) ?
4411 					ucmd->ece_options : 0;
4412 			resp->response_length = sizeof(*resp);
4413 		}
4414 		err = mlx5_core_qp_modify(dev, op, optpar, qpc, &base->mqp,
4415 					  &resp->ece_options);
4416 	}
4417 
4418 	if (err)
4419 		goto out;
4420 
4421 	qp->state = new_state;
4422 
4423 	if (attr_mask & IB_QP_ACCESS_FLAGS)
4424 		qp->trans_qp.atomic_rd_en = attr->qp_access_flags;
4425 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4426 		qp->trans_qp.resp_depth = attr->max_dest_rd_atomic;
4427 	if (attr_mask & IB_QP_PORT)
4428 		qp->port = attr->port_num;
4429 	if (attr_mask & IB_QP_ALT_PATH)
4430 		qp->trans_qp.alt_port = attr->alt_port_num;
4431 
4432 	/*
4433 	 * If we moved a kernel QP to RESET, clean up all old CQ
4434 	 * entries and reinitialize the QP.
4435 	 */
4436 	if (new_state == IB_QPS_RESET &&
4437 	    !ibqp->uobject && qp->type != IB_QPT_XRC_TGT) {
4438 		mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
4439 				 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
4440 		if (send_cq != recv_cq)
4441 			mlx5_ib_cq_clean(send_cq, base->mqp.qpn, NULL);
4442 
4443 		qp->rq.head = 0;
4444 		qp->rq.tail = 0;
4445 		qp->sq.head = 0;
4446 		qp->sq.tail = 0;
4447 		qp->sq.cur_post = 0;
4448 		if (qp->sq.wqe_cnt)
4449 			qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
4450 		qp->sq.last_poll = 0;
4451 		qp->db.db[MLX5_RCV_DBR] = 0;
4452 		qp->db.db[MLX5_SND_DBR] = 0;
4453 	}
4454 
4455 	if ((new_state == IB_QPS_RTS) && qp->counter_pending) {
4456 		err = __mlx5_ib_qp_set_counter(ibqp, ibqp->counter);
4457 		if (!err)
4458 			qp->counter_pending = 0;
4459 	}
4460 
4461 out:
4462 	kfree(qpc);
4463 	return err;
4464 }
4465 
4466 static inline bool is_valid_mask(int mask, int req, int opt)
4467 {
4468 	if ((mask & req) != req)
4469 		return false;
4470 
4471 	if (mask & ~(req | opt))
4472 		return false;
4473 
4474 	return true;
4475 }
4476 
4477 /* check valid transition for driver QP types
4478  * for now the only QP type that this function supports is DCI
4479  */
4480 static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new_state,
4481 				enum ib_qp_attr_mask attr_mask)
4482 {
4483 	int req = IB_QP_STATE;
4484 	int opt = 0;
4485 
4486 	if (new_state == IB_QPS_RESET) {
4487 		return is_valid_mask(attr_mask, req, opt);
4488 	} else if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4489 		req |= IB_QP_PKEY_INDEX | IB_QP_PORT;
4490 		return is_valid_mask(attr_mask, req, opt);
4491 	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
4492 		opt = IB_QP_PKEY_INDEX | IB_QP_PORT;
4493 		return is_valid_mask(attr_mask, req, opt);
4494 	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4495 		req |= IB_QP_PATH_MTU;
4496 		opt = IB_QP_PKEY_INDEX | IB_QP_AV;
4497 		return is_valid_mask(attr_mask, req, opt);
4498 	} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
4499 		req |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4500 		       IB_QP_MAX_QP_RD_ATOMIC | IB_QP_SQ_PSN;
4501 		opt = IB_QP_MIN_RNR_TIMER;
4502 		return is_valid_mask(attr_mask, req, opt);
4503 	} else if (cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) {
4504 		opt = IB_QP_MIN_RNR_TIMER;
4505 		return is_valid_mask(attr_mask, req, opt);
4506 	} else if (cur_state != IB_QPS_RESET && new_state == IB_QPS_ERR) {
4507 		return is_valid_mask(attr_mask, req, opt);
4508 	}
4509 	return false;
4510 }
4511 
4512 /* mlx5_ib_modify_dct: modify a DCT QP
4513  * valid transitions are:
4514  * RESET to INIT: must set access_flags, pkey_index and port
4515  * INIT  to RTR : must set min_rnr_timer, tclass, flow_label,
4516  *			   mtu, gid_index and hop_limit
4517  * Other transitions and attributes are illegal
4518  */
4519 static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
4520 			      int attr_mask, struct mlx5_ib_modify_qp *ucmd,
4521 			      struct ib_udata *udata)
4522 {
4523 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
4524 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4525 	enum ib_qp_state cur_state, new_state;
4526 	int required = IB_QP_STATE;
4527 	void *dctc;
4528 	int err;
4529 
4530 	if (!(attr_mask & IB_QP_STATE))
4531 		return -EINVAL;
4532 
4533 	cur_state = qp->state;
4534 	new_state = attr->qp_state;
4535 
4536 	dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
4537 	if (MLX5_CAP_GEN(dev->mdev, ece_support) && ucmd->ece_options)
4538 		/*
4539 		 * DCT doesn't initialize QP till modify command is executed,
4540 		 * so we need to overwrite previously set ECE field if user
4541 		 * provided any value except zero, which means not set/not
4542 		 * valid.
4543 		 */
4544 		MLX5_SET(dctc, dctc, ece, ucmd->ece_options);
4545 
4546 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4547 		u16 set_id;
4548 
4549 		required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
4550 		if (!is_valid_mask(attr_mask, required, 0))
4551 			return -EINVAL;
4552 
4553 		if (attr->port_num == 0 ||
4554 		    attr->port_num > dev->num_ports) {
4555 			mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
4556 				    attr->port_num, dev->num_ports);
4557 			return -EINVAL;
4558 		}
4559 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
4560 			MLX5_SET(dctc, dctc, rre, 1);
4561 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
4562 			MLX5_SET(dctc, dctc, rwe, 1);
4563 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) {
4564 			int atomic_mode;
4565 
4566 			atomic_mode = get_atomic_mode(dev, qp);
4567 			if (atomic_mode < 0)
4568 				return -EOPNOTSUPP;
4569 
4570 			MLX5_SET(dctc, dctc, atomic_mode, atomic_mode);
4571 			MLX5_SET(dctc, dctc, rae, 1);
4572 		}
4573 		MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
4574 		if (mlx5_lag_is_active(dev->mdev))
4575 			MLX5_SET(dctc, dctc, port,
4576 				 get_tx_affinity_rr(dev, udata));
4577 		else
4578 			MLX5_SET(dctc, dctc, port, attr->port_num);
4579 
4580 		set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
4581 		MLX5_SET(dctc, dctc, counter_set_id, set_id);
4582 	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4583 		struct mlx5_ib_modify_qp_resp resp = {};
4584 		u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {};
4585 		u32 min_resp_len = offsetofend(typeof(resp), dctn);
4586 
4587 		if (udata->outlen < min_resp_len)
4588 			return -EINVAL;
4589 		/*
4590 		 * If we don't have enough space for the ECE options,
4591 		 * simply indicate it with resp.response_length.
4592 		 */
4593 		resp.response_length = (udata->outlen < sizeof(resp)) ?
4594 					       min_resp_len :
4595 					       sizeof(resp);
4596 
4597 		required |= IB_QP_MIN_RNR_TIMER | IB_QP_AV | IB_QP_PATH_MTU;
4598 		if (!is_valid_mask(attr_mask, required, 0))
4599 			return -EINVAL;
4600 		MLX5_SET(dctc, dctc, min_rnr_nak, attr->min_rnr_timer);
4601 		MLX5_SET(dctc, dctc, tclass, attr->ah_attr.grh.traffic_class);
4602 		MLX5_SET(dctc, dctc, flow_label, attr->ah_attr.grh.flow_label);
4603 		MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
4604 		MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
4605 		MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
4606 		if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
4607 			MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7);
4608 		if (qp->is_ooo_rq) {
4609 			MLX5_SET(dctc, dctc, dp_ordering_1, 1);
4610 			MLX5_SET(dctc, dctc, dp_ordering_force, 1);
4611 		}
4612 
4613 		err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
4614 					   MLX5_ST_SZ_BYTES(create_dct_in), out,
4615 					   sizeof(out));
4616 		err = mlx5_cmd_check(dev->mdev, err, qp->dct.in, out);
4617 		if (err)
4618 			return err;
4619 		resp.dctn = qp->dct.mdct.mqp.qpn;
4620 		if (MLX5_CAP_GEN(dev->mdev, ece_support))
4621 			resp.ece_options = MLX5_GET(create_dct_out, out, ece);
4622 		err = ib_copy_to_udata(udata, &resp, resp.response_length);
4623 		if (err) {
4624 			mlx5_core_destroy_dct(dev, &qp->dct.mdct);
4625 			return err;
4626 		}
4627 	} else {
4628 		mlx5_ib_warn(dev, "Modify DCT: Invalid transition from %d to %d\n", cur_state, new_state);
4629 		return -EINVAL;
4630 	}
4631 
4632 	qp->state = new_state;
4633 	return 0;
4634 }
4635 
4636 static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev,
4637 				      struct mlx5_ib_qp *qp)
4638 {
4639 	if (dev->profile != &raw_eth_profile)
4640 		return true;
4641 
4642 	if (qp->type == IB_QPT_RAW_PACKET || qp->type == MLX5_IB_QPT_REG_UMR)
4643 		return true;
4644 
4645 	return false;
4646 }
4647 
4648 static int validate_rd_atomic(struct mlx5_ib_dev *dev, struct ib_qp_attr *attr,
4649 			      int attr_mask, enum ib_qp_type qp_type)
4650 {
4651 	int log_max_ra_res;
4652 	int log_max_ra_req;
4653 
4654 	if (qp_type == MLX5_IB_QPT_DCI) {
4655 		log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
4656 						   log_max_ra_res_dc);
4657 		log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
4658 						   log_max_ra_req_dc);
4659 	} else {
4660 		log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
4661 						   log_max_ra_res_qp);
4662 		log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
4663 						   log_max_ra_req_qp);
4664 	}
4665 
4666 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
4667 	    attr->max_rd_atomic > log_max_ra_res) {
4668 		mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
4669 			    attr->max_rd_atomic);
4670 		return false;
4671 	}
4672 
4673 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
4674 	    attr->max_dest_rd_atomic > log_max_ra_req) {
4675 		mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
4676 			    attr->max_dest_rd_atomic);
4677 		return false;
4678 	}
4679 	return true;
4680 }
4681 
4682 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
4683 		      int attr_mask, struct ib_udata *udata)
4684 {
4685 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4686 	struct mlx5_ib_modify_qp_resp resp = {};
4687 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
4688 	struct mlx5_ib_modify_qp ucmd = {};
4689 	enum ib_qp_type qp_type;
4690 	enum ib_qp_state cur_state, new_state;
4691 	int err = -EINVAL;
4692 
4693 	if (!mlx5_ib_modify_qp_allowed(dev, qp))
4694 		return -EOPNOTSUPP;
4695 
4696 	if (attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT))
4697 		return -EOPNOTSUPP;
4698 
4699 	if (ibqp->rwq_ind_tbl)
4700 		return -ENOSYS;
4701 
4702 	if (udata && udata->inlen) {
4703 		if (udata->inlen < offsetofend(typeof(ucmd), ece_options))
4704 			return -EINVAL;
4705 
4706 		if (udata->inlen > sizeof(ucmd) &&
4707 		    !ib_is_udata_cleared(udata, sizeof(ucmd),
4708 					 udata->inlen - sizeof(ucmd)))
4709 			return -EOPNOTSUPP;
4710 
4711 		if (ib_copy_from_udata(&ucmd, udata,
4712 				       min(udata->inlen, sizeof(ucmd))))
4713 			return -EFAULT;
4714 
4715 		if (ucmd.comp_mask & ~MLX5_IB_MODIFY_QP_OOO_DP ||
4716 		    memchr_inv(&ucmd.burst_info.reserved, 0,
4717 			       sizeof(ucmd.burst_info.reserved)))
4718 			return -EOPNOTSUPP;
4719 
4720 		if (ucmd.comp_mask & MLX5_IB_MODIFY_QP_OOO_DP) {
4721 			if (!get_dp_ooo_cap(dev->mdev, qp->type))
4722 				return -EOPNOTSUPP;
4723 			qp->is_ooo_rq = 1;
4724 		}
4725 	}
4726 
4727 	if (qp->type == IB_QPT_GSI)
4728 		return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
4729 
4730 	qp_type = (qp->type == MLX5_IB_QPT_HW_GSI) ? IB_QPT_GSI : qp->type;
4731 
4732 	if (qp_type == MLX5_IB_QPT_DCT)
4733 		return mlx5_ib_modify_dct(ibqp, attr, attr_mask, &ucmd, udata);
4734 
4735 	mutex_lock(&qp->mutex);
4736 
4737 	cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
4738 	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
4739 
4740 	if (qp->flags & IB_QP_CREATE_SOURCE_QPN) {
4741 		if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) {
4742 			mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n",
4743 				    attr_mask);
4744 			goto out;
4745 		}
4746 	} else if (qp_type != MLX5_IB_QPT_REG_UMR &&
4747 		   qp_type != MLX5_IB_QPT_DCI &&
4748 		   !ib_modify_qp_is_ok(cur_state, new_state, qp_type,
4749 				       attr_mask)) {
4750 		mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
4751 			    cur_state, new_state, qp->type, attr_mask);
4752 		goto out;
4753 	} else if (qp_type == MLX5_IB_QPT_DCI &&
4754 		   !modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) {
4755 		mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
4756 			    cur_state, new_state, qp_type, attr_mask);
4757 		goto out;
4758 	}
4759 
4760 	if ((attr_mask & IB_QP_PORT) &&
4761 	    (attr->port_num == 0 ||
4762 	     attr->port_num > dev->num_ports)) {
4763 		mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
4764 			    attr->port_num, dev->num_ports);
4765 		goto out;
4766 	}
4767 
4768 	if ((attr_mask & IB_QP_PKEY_INDEX) &&
4769 	    attr->pkey_index >= dev->pkey_table_len) {
4770 		mlx5_ib_dbg(dev, "invalid pkey index %d\n", attr->pkey_index);
4771 		goto out;
4772 	}
4773 
4774 	if (!validate_rd_atomic(dev, attr, attr_mask, qp_type))
4775 		goto out;
4776 
4777 	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
4778 		err = 0;
4779 		goto out;
4780 	}
4781 
4782 	err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state,
4783 				  new_state, &ucmd, &resp, udata);
4784 
4785 	/* resp.response_length is set in ECE supported flows only */
4786 	if (!err && resp.response_length &&
4787 	    udata->outlen >= resp.response_length)
4788 		/* Return -EFAULT to the user and expect him to destroy QP. */
4789 		err = ib_copy_to_udata(udata, &resp, resp.response_length);
4790 
4791 out:
4792 	mutex_unlock(&qp->mutex);
4793 	return err;
4794 }
4795 
4796 static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
4797 {
4798 	switch (mlx5_state) {
4799 	case MLX5_QP_STATE_RST:      return IB_QPS_RESET;
4800 	case MLX5_QP_STATE_INIT:     return IB_QPS_INIT;
4801 	case MLX5_QP_STATE_RTR:      return IB_QPS_RTR;
4802 	case MLX5_QP_STATE_RTS:      return IB_QPS_RTS;
4803 	case MLX5_QP_STATE_SQ_DRAINING:
4804 	case MLX5_QP_STATE_SQD:      return IB_QPS_SQD;
4805 	case MLX5_QP_STATE_SQER:     return IB_QPS_SQE;
4806 	case MLX5_QP_STATE_ERR:      return IB_QPS_ERR;
4807 	default:		     return -1;
4808 	}
4809 }
4810 
4811 static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
4812 {
4813 	switch (mlx5_mig_state) {
4814 	case MLX5_QP_PM_ARMED:		return IB_MIG_ARMED;
4815 	case MLX5_QP_PM_REARM:		return IB_MIG_REARM;
4816 	case MLX5_QP_PM_MIGRATED:	return IB_MIG_MIGRATED;
4817 	default: return -1;
4818 	}
4819 }
4820 
4821 static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
4822 			    struct rdma_ah_attr *ah_attr, void *path)
4823 {
4824 	int port = MLX5_GET(ads, path, vhca_port_num);
4825 	int static_rate;
4826 
4827 	memset(ah_attr, 0, sizeof(*ah_attr));
4828 
4829 	if (!port || port > ibdev->num_ports)
4830 		return;
4831 
4832 	ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port);
4833 
4834 	rdma_ah_set_port_num(ah_attr, port);
4835 	rdma_ah_set_sl(ah_attr, MLX5_GET(ads, path, sl));
4836 
4837 	rdma_ah_set_dlid(ah_attr, MLX5_GET(ads, path, rlid));
4838 	rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid));
4839 
4840 	static_rate = MLX5_GET(ads, path, stat_rate);
4841 	rdma_ah_set_static_rate(ah_attr, mlx5_to_ib_rate_map(static_rate));
4842 	if (MLX5_GET(ads, path, grh) ||
4843 	    ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
4844 		rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label),
4845 				MLX5_GET(ads, path, src_addr_index),
4846 				MLX5_GET(ads, path, hop_limit),
4847 				MLX5_GET(ads, path, tclass));
4848 		rdma_ah_set_dgid_raw(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip));
4849 	}
4850 }
4851 
4852 static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev,
4853 					struct mlx5_ib_sq *sq,
4854 					u8 *sq_state)
4855 {
4856 	int err;
4857 
4858 	err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state);
4859 	if (err)
4860 		goto out;
4861 	sq->state = *sq_state;
4862 
4863 out:
4864 	return err;
4865 }
4866 
4867 static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev,
4868 					struct mlx5_ib_rq *rq,
4869 					u8 *rq_state)
4870 {
4871 	void *out;
4872 	void *rqc;
4873 	int inlen;
4874 	int err;
4875 
4876 	inlen = MLX5_ST_SZ_BYTES(query_rq_out);
4877 	out = kvzalloc(inlen, GFP_KERNEL);
4878 	if (!out)
4879 		return -ENOMEM;
4880 
4881 	err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out);
4882 	if (err)
4883 		goto out;
4884 
4885 	rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context);
4886 	*rq_state = MLX5_GET(rqc, rqc, state);
4887 	rq->state = *rq_state;
4888 
4889 out:
4890 	kvfree(out);
4891 	return err;
4892 }
4893 
4894 static int sqrq_state_to_qp_state(u8 sq_state, u8 rq_state,
4895 				  struct mlx5_ib_qp *qp, u8 *qp_state)
4896 {
4897 	static const u8 sqrq_trans[MLX5_RQ_NUM_STATE][MLX5_SQ_NUM_STATE] = {
4898 		[MLX5_RQC_STATE_RST] = {
4899 			[MLX5_SQC_STATE_RST]	= IB_QPS_RESET,
4900 			[MLX5_SQC_STATE_RDY]	= MLX5_QP_STATE_BAD,
4901 			[MLX5_SQC_STATE_ERR]	= MLX5_QP_STATE_BAD,
4902 			[MLX5_SQ_STATE_NA]	= IB_QPS_RESET,
4903 		},
4904 		[MLX5_RQC_STATE_RDY] = {
4905 			[MLX5_SQC_STATE_RST]	= MLX5_QP_STATE,
4906 			[MLX5_SQC_STATE_RDY]	= MLX5_QP_STATE,
4907 			[MLX5_SQC_STATE_ERR]	= IB_QPS_SQE,
4908 			[MLX5_SQ_STATE_NA]	= MLX5_QP_STATE,
4909 		},
4910 		[MLX5_RQC_STATE_ERR] = {
4911 			[MLX5_SQC_STATE_RST]    = MLX5_QP_STATE_BAD,
4912 			[MLX5_SQC_STATE_RDY]	= MLX5_QP_STATE_BAD,
4913 			[MLX5_SQC_STATE_ERR]	= IB_QPS_ERR,
4914 			[MLX5_SQ_STATE_NA]	= IB_QPS_ERR,
4915 		},
4916 		[MLX5_RQ_STATE_NA] = {
4917 			[MLX5_SQC_STATE_RST]    = MLX5_QP_STATE,
4918 			[MLX5_SQC_STATE_RDY]	= MLX5_QP_STATE,
4919 			[MLX5_SQC_STATE_ERR]	= MLX5_QP_STATE,
4920 			[MLX5_SQ_STATE_NA]	= MLX5_QP_STATE_BAD,
4921 		},
4922 	};
4923 
4924 	*qp_state = sqrq_trans[rq_state][sq_state];
4925 
4926 	if (*qp_state == MLX5_QP_STATE_BAD) {
4927 		WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x",
4928 		     qp->raw_packet_qp.sq.base.mqp.qpn, sq_state,
4929 		     qp->raw_packet_qp.rq.base.mqp.qpn, rq_state);
4930 		return -EINVAL;
4931 	}
4932 
4933 	if (*qp_state == MLX5_QP_STATE)
4934 		*qp_state = qp->state;
4935 
4936 	return 0;
4937 }
4938 
4939 static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev,
4940 				     struct mlx5_ib_qp *qp,
4941 				     u8 *raw_packet_qp_state)
4942 {
4943 	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
4944 	struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
4945 	struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
4946 	int err;
4947 	u8 sq_state = MLX5_SQ_STATE_NA;
4948 	u8 rq_state = MLX5_RQ_STATE_NA;
4949 
4950 	if (qp->sq.wqe_cnt) {
4951 		err = query_raw_packet_qp_sq_state(dev, sq, &sq_state);
4952 		if (err)
4953 			return err;
4954 	}
4955 
4956 	if (qp->rq.wqe_cnt) {
4957 		err = query_raw_packet_qp_rq_state(dev, rq, &rq_state);
4958 		if (err)
4959 			return err;
4960 	}
4961 
4962 	return sqrq_state_to_qp_state(sq_state, rq_state, qp,
4963 				      raw_packet_qp_state);
4964 }
4965 
4966 static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
4967 			 struct ib_qp_attr *qp_attr)
4968 {
4969 	int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
4970 	void *qpc, *pri_path, *alt_path;
4971 	u32 *outb;
4972 	int err;
4973 
4974 	outb = kzalloc(outlen, GFP_KERNEL);
4975 	if (!outb)
4976 		return -ENOMEM;
4977 
4978 	err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen,
4979 				 false);
4980 	if (err)
4981 		goto out;
4982 
4983 	qpc = MLX5_ADDR_OF(query_qp_out, outb, qpc);
4984 
4985 	qp->state = to_ib_qp_state(MLX5_GET(qpc, qpc, state));
4986 	if (MLX5_GET(qpc, qpc, state) == MLX5_QP_STATE_SQ_DRAINING)
4987 		qp_attr->sq_draining = 1;
4988 
4989 	qp_attr->path_mtu = MLX5_GET(qpc, qpc, mtu);
4990 	qp_attr->path_mig_state = to_ib_mig_state(MLX5_GET(qpc, qpc, pm_state));
4991 	qp_attr->qkey = MLX5_GET(qpc, qpc, q_key);
4992 	qp_attr->rq_psn = MLX5_GET(qpc, qpc, next_rcv_psn);
4993 	qp_attr->sq_psn = MLX5_GET(qpc, qpc, next_send_psn);
4994 	qp_attr->dest_qp_num = MLX5_GET(qpc, qpc, remote_qpn);
4995 
4996 	if (MLX5_GET(qpc, qpc, rre))
4997 		qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ;
4998 	if (MLX5_GET(qpc, qpc, rwe))
4999 		qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_WRITE;
5000 	if (MLX5_GET(qpc, qpc, rae))
5001 		qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_ATOMIC;
5002 
5003 	qp_attr->max_rd_atomic = 1 << MLX5_GET(qpc, qpc, log_sra_max);
5004 	qp_attr->max_dest_rd_atomic = 1 << MLX5_GET(qpc, qpc, log_rra_max);
5005 	qp_attr->min_rnr_timer = MLX5_GET(qpc, qpc, min_rnr_nak);
5006 	qp_attr->retry_cnt = MLX5_GET(qpc, qpc, retry_count);
5007 	qp_attr->rnr_retry = MLX5_GET(qpc, qpc, rnr_retry);
5008 
5009 	pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
5010 	alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path);
5011 
5012 	if (qp->type == IB_QPT_RC || qp->type == IB_QPT_UC ||
5013 	    qp->type == IB_QPT_XRC_INI || qp->type == IB_QPT_XRC_TGT) {
5014 		to_rdma_ah_attr(dev, &qp_attr->ah_attr, pri_path);
5015 		to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, alt_path);
5016 		qp_attr->alt_pkey_index = MLX5_GET(ads, alt_path, pkey_index);
5017 		qp_attr->alt_port_num = MLX5_GET(ads, alt_path, vhca_port_num);
5018 	}
5019 
5020 	qp_attr->pkey_index = MLX5_GET(ads, pri_path, pkey_index);
5021 	qp_attr->port_num = MLX5_GET(ads, pri_path, vhca_port_num);
5022 	qp_attr->timeout = MLX5_GET(ads, pri_path, ack_timeout);
5023 	qp_attr->alt_timeout = MLX5_GET(ads, alt_path, ack_timeout);
5024 
5025 out:
5026 	kfree(outb);
5027 	return err;
5028 }
5029 
5030 static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
5031 				struct ib_qp_attr *qp_attr, int qp_attr_mask,
5032 				struct ib_qp_init_attr *qp_init_attr)
5033 {
5034 	struct mlx5_core_dct	*dct = &mqp->dct.mdct;
5035 	u32 *out;
5036 	u32 access_flags = 0;
5037 	int outlen = MLX5_ST_SZ_BYTES(query_dct_out);
5038 	void *dctc;
5039 	int err;
5040 	int supported_mask = IB_QP_STATE |
5041 			     IB_QP_ACCESS_FLAGS |
5042 			     IB_QP_PORT |
5043 			     IB_QP_MIN_RNR_TIMER |
5044 			     IB_QP_AV |
5045 			     IB_QP_PATH_MTU |
5046 			     IB_QP_PKEY_INDEX;
5047 
5048 	if (qp_attr_mask & ~supported_mask)
5049 		return -EINVAL;
5050 	if (mqp->state != IB_QPS_RTR)
5051 		return -EINVAL;
5052 
5053 	out = kzalloc(outlen, GFP_KERNEL);
5054 	if (!out)
5055 		return -ENOMEM;
5056 
5057 	err = mlx5_core_dct_query(dev, dct, out, outlen);
5058 	if (err)
5059 		goto out;
5060 
5061 	dctc = MLX5_ADDR_OF(query_dct_out, out, dct_context_entry);
5062 
5063 	if (qp_attr_mask & IB_QP_STATE)
5064 		qp_attr->qp_state = IB_QPS_RTR;
5065 
5066 	if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
5067 		if (MLX5_GET(dctc, dctc, rre))
5068 			access_flags |= IB_ACCESS_REMOTE_READ;
5069 		if (MLX5_GET(dctc, dctc, rwe))
5070 			access_flags |= IB_ACCESS_REMOTE_WRITE;
5071 		if (MLX5_GET(dctc, dctc, rae))
5072 			access_flags |= IB_ACCESS_REMOTE_ATOMIC;
5073 		qp_attr->qp_access_flags = access_flags;
5074 	}
5075 
5076 	if (qp_attr_mask & IB_QP_PORT)
5077 		qp_attr->port_num = MLX5_GET(dctc, dctc, port);
5078 	if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
5079 		qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
5080 	if (qp_attr_mask & IB_QP_AV) {
5081 		qp_attr->ah_attr.grh.traffic_class = MLX5_GET(dctc, dctc, tclass);
5082 		qp_attr->ah_attr.grh.flow_label = MLX5_GET(dctc, dctc, flow_label);
5083 		qp_attr->ah_attr.grh.sgid_index = MLX5_GET(dctc, dctc, my_addr_index);
5084 		qp_attr->ah_attr.grh.hop_limit = MLX5_GET(dctc, dctc, hop_limit);
5085 	}
5086 	if (qp_attr_mask & IB_QP_PATH_MTU)
5087 		qp_attr->path_mtu = MLX5_GET(dctc, dctc, mtu);
5088 	if (qp_attr_mask & IB_QP_PKEY_INDEX)
5089 		qp_attr->pkey_index = MLX5_GET(dctc, dctc, pkey_index);
5090 out:
5091 	kfree(out);
5092 	return err;
5093 }
5094 
5095 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
5096 		     int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
5097 {
5098 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
5099 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
5100 	int err = 0;
5101 	u8 raw_packet_qp_state;
5102 
5103 	if (ibqp->rwq_ind_tbl)
5104 		return -ENOSYS;
5105 
5106 	if (qp->type == IB_QPT_GSI)
5107 		return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
5108 					    qp_init_attr);
5109 
5110 	/* Not all of output fields are applicable, make sure to zero them */
5111 	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
5112 	memset(qp_attr, 0, sizeof(*qp_attr));
5113 
5114 	if (unlikely(qp->type == MLX5_IB_QPT_DCT))
5115 		return mlx5_ib_dct_query_qp(dev, qp, qp_attr,
5116 					    qp_attr_mask, qp_init_attr);
5117 
5118 	mutex_lock(&qp->mutex);
5119 
5120 	if (qp->type == IB_QPT_RAW_PACKET ||
5121 	    qp->flags & IB_QP_CREATE_SOURCE_QPN) {
5122 		err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state);
5123 		if (err)
5124 			goto out;
5125 		qp->state = raw_packet_qp_state;
5126 		qp_attr->port_num = 1;
5127 	} else {
5128 		err = query_qp_attr(dev, qp, qp_attr);
5129 		if (err)
5130 			goto out;
5131 	}
5132 
5133 	qp_attr->qp_state	     = qp->state;
5134 	qp_attr->cur_qp_state	     = qp_attr->qp_state;
5135 	qp_attr->cap.max_recv_wr     = qp->rq.wqe_cnt;
5136 	qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
5137 
5138 	if (!ibqp->uobject) {
5139 		qp_attr->cap.max_send_wr  = qp->sq.max_post;
5140 		qp_attr->cap.max_send_sge = qp->sq.max_gs;
5141 		qp_init_attr->qp_context = ibqp->qp_context;
5142 	} else {
5143 		qp_attr->cap.max_send_wr  = 0;
5144 		qp_attr->cap.max_send_sge = 0;
5145 	}
5146 
5147 	qp_init_attr->qp_type = qp->type;
5148 	qp_init_attr->recv_cq = ibqp->recv_cq;
5149 	qp_init_attr->send_cq = ibqp->send_cq;
5150 	qp_init_attr->srq = ibqp->srq;
5151 	qp_attr->cap.max_inline_data = qp->max_inline_data;
5152 
5153 	qp_init_attr->cap	     = qp_attr->cap;
5154 
5155 	qp_init_attr->create_flags = qp->flags;
5156 
5157 	qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
5158 		IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
5159 
5160 out:
5161 	mutex_unlock(&qp->mutex);
5162 	return err;
5163 }
5164 
5165 int mlx5_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
5166 {
5167 	struct mlx5_ib_dev *dev = to_mdev(ibxrcd->device);
5168 	struct mlx5_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
5169 
5170 	if (!MLX5_CAP_GEN(dev->mdev, xrc))
5171 		return -EOPNOTSUPP;
5172 
5173 	return mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
5174 }
5175 
5176 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
5177 {
5178 	struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
5179 	u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
5180 
5181 	return mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
5182 }
5183 
5184 static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
5185 {
5186 	struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp);
5187 	struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device);
5188 	struct ib_event event;
5189 
5190 	if (rwq->ibwq.event_handler) {
5191 		event.device     = rwq->ibwq.device;
5192 		event.element.wq = &rwq->ibwq;
5193 		switch (type) {
5194 		case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
5195 			event.event = IB_EVENT_WQ_FATAL;
5196 			break;
5197 		default:
5198 			mlx5_ib_warn(dev, "Unexpected event type %d on WQ %06x\n", type, core_qp->qpn);
5199 			return;
5200 		}
5201 
5202 		rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context);
5203 	}
5204 }
5205 
5206 static int set_delay_drop(struct mlx5_ib_dev *dev)
5207 {
5208 	int err = 0;
5209 
5210 	mutex_lock(&dev->delay_drop.lock);
5211 	if (dev->delay_drop.activate)
5212 		goto out;
5213 
5214 	err = mlx5_core_set_delay_drop(dev, dev->delay_drop.timeout);
5215 	if (err)
5216 		goto out;
5217 
5218 	dev->delay_drop.activate = true;
5219 out:
5220 	mutex_unlock(&dev->delay_drop.lock);
5221 
5222 	if (!err)
5223 		atomic_inc(&dev->delay_drop.rqs_cnt);
5224 	return err;
5225 }
5226 
5227 static int  create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
5228 		      struct ib_wq_init_attr *init_attr)
5229 {
5230 	struct mlx5_ib_dev *dev;
5231 	int has_net_offloads;
5232 	__be64 *rq_pas0;
5233 	int ts_format;
5234 	void *in;
5235 	void *rqc;
5236 	void *wq;
5237 	int inlen;
5238 	int err;
5239 
5240 	dev = to_mdev(pd->device);
5241 
5242 	ts_format = get_rq_ts_format(dev, to_mcq(init_attr->cq));
5243 	if (ts_format < 0)
5244 		return ts_format;
5245 
5246 	inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
5247 	in = kvzalloc(inlen, GFP_KERNEL);
5248 	if (!in)
5249 		return -ENOMEM;
5250 
5251 	MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
5252 	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
5253 	MLX5_SET(rqc,  rqc, mem_rq_type,
5254 		 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
5255 	MLX5_SET(rqc, rqc, ts_format, ts_format);
5256 	MLX5_SET(rqc, rqc, user_index, rwq->user_index);
5257 	MLX5_SET(rqc,  rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn);
5258 	MLX5_SET(rqc,  rqc, state, MLX5_RQC_STATE_RST);
5259 	MLX5_SET(rqc,  rqc, flush_in_error_en, 1);
5260 	wq = MLX5_ADDR_OF(rqc, rqc, wq);
5261 	MLX5_SET(wq, wq, wq_type,
5262 		 rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ ?
5263 		 MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ : MLX5_WQ_TYPE_CYCLIC);
5264 	if (init_attr->create_flags & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
5265 		if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
5266 			mlx5_ib_dbg(dev, "Scatter end padding is not supported\n");
5267 			err = -EOPNOTSUPP;
5268 			goto out;
5269 		} else {
5270 			MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
5271 		}
5272 	}
5273 	MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
5274 	if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) {
5275 		/*
5276 		 * In Firmware number of strides in each WQE is:
5277 		 *   "512 * 2^single_wqe_log_num_of_strides"
5278 		 * Values 3 to 8 are accepted as 10 to 15, 9 to 18 are
5279 		 * accepted as 0 to 9
5280 		 */
5281 		static const u8 fw_map[] = { 10, 11, 12, 13, 14, 15, 0, 1,
5282 					     2,  3,  4,  5,  6,  7,  8, 9 };
5283 		MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en);
5284 		MLX5_SET(wq, wq, log_wqe_stride_size,
5285 			 rwq->single_stride_log_num_of_bytes -
5286 			 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES);
5287 		MLX5_SET(wq, wq, log_wqe_num_of_strides,
5288 			 fw_map[rwq->log_num_strides -
5289 				MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES]);
5290 	}
5291 	MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
5292 	MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
5293 	MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset);
5294 	MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size);
5295 	MLX5_SET(wq, wq, wq_signature, rwq->wq_sig);
5296 	MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma);
5297 	has_net_offloads = MLX5_CAP_GEN(dev->mdev, eth_net_offloads);
5298 	if (init_attr->create_flags & IB_WQ_FLAGS_CVLAN_STRIPPING) {
5299 		if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
5300 			mlx5_ib_dbg(dev, "VLAN offloads are not supported\n");
5301 			err = -EOPNOTSUPP;
5302 			goto out;
5303 		}
5304 	} else {
5305 		MLX5_SET(rqc, rqc, vsd, 1);
5306 	}
5307 	if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS) {
5308 		if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, scatter_fcs))) {
5309 			mlx5_ib_dbg(dev, "Scatter FCS is not supported\n");
5310 			err = -EOPNOTSUPP;
5311 			goto out;
5312 		}
5313 		MLX5_SET(rqc, rqc, scatter_fcs, 1);
5314 	}
5315 	if (init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
5316 		if (!(dev->ib_dev.attrs.raw_packet_caps &
5317 		      IB_RAW_PACKET_CAP_DELAY_DROP)) {
5318 			mlx5_ib_dbg(dev, "Delay drop is not supported\n");
5319 			err = -EOPNOTSUPP;
5320 			goto out;
5321 		}
5322 		MLX5_SET(rqc, rqc, delay_drop_en, 1);
5323 	}
5324 	rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
5325 	mlx5_ib_populate_pas(rwq->umem, 1UL << rwq->page_shift, rq_pas0, 0);
5326 	err = mlx5_core_create_rq_tracked(dev, in, inlen, &rwq->core_qp);
5327 	if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
5328 		err = set_delay_drop(dev);
5329 		if (err) {
5330 			mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n",
5331 				     err);
5332 			mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
5333 		} else {
5334 			rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP;
5335 		}
5336 	}
5337 out:
5338 	kvfree(in);
5339 	return err;
5340 }
5341 
5342 static int set_user_rq_size(struct mlx5_ib_dev *dev,
5343 			    struct ib_wq_init_attr *wq_init_attr,
5344 			    struct mlx5_ib_create_wq *ucmd,
5345 			    struct mlx5_ib_rwq *rwq)
5346 {
5347 	/* Sanity check RQ size before proceeding */
5348 	if (wq_init_attr->max_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_wq_sz)))
5349 		return -EINVAL;
5350 
5351 	if (!ucmd->rq_wqe_count)
5352 		return -EINVAL;
5353 
5354 	rwq->wqe_count = ucmd->rq_wqe_count;
5355 	rwq->wqe_shift = ucmd->rq_wqe_shift;
5356 	if (check_shl_overflow(rwq->wqe_count, rwq->wqe_shift, &rwq->buf_size))
5357 		return -EINVAL;
5358 
5359 	rwq->log_rq_stride = rwq->wqe_shift;
5360 	rwq->log_rq_size = ilog2(rwq->wqe_count);
5361 	return 0;
5362 }
5363 
5364 static bool log_of_strides_valid(struct mlx5_ib_dev *dev, u32 log_num_strides)
5365 {
5366 	if ((log_num_strides > MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) ||
5367 	    (log_num_strides < MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
5368 		return false;
5369 
5370 	if (!MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) &&
5371 	    (log_num_strides < MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
5372 		return false;
5373 
5374 	return true;
5375 }
5376 
5377 static int prepare_user_rq(struct ib_pd *pd,
5378 			   struct ib_wq_init_attr *init_attr,
5379 			   struct ib_udata *udata,
5380 			   struct mlx5_ib_rwq *rwq)
5381 {
5382 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
5383 	struct mlx5_ib_create_wq ucmd = {};
5384 	int err;
5385 	size_t required_cmd_sz;
5386 
5387 	required_cmd_sz = offsetofend(struct mlx5_ib_create_wq,
5388 				      single_stride_log_num_of_bytes);
5389 	if (udata->inlen < required_cmd_sz) {
5390 		mlx5_ib_dbg(dev, "invalid inlen\n");
5391 		return -EINVAL;
5392 	}
5393 
5394 	if (udata->inlen > sizeof(ucmd) &&
5395 	    !ib_is_udata_cleared(udata, sizeof(ucmd),
5396 				 udata->inlen - sizeof(ucmd))) {
5397 		mlx5_ib_dbg(dev, "inlen is not supported\n");
5398 		return -EOPNOTSUPP;
5399 	}
5400 
5401 	if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
5402 		mlx5_ib_dbg(dev, "copy failed\n");
5403 		return -EFAULT;
5404 	}
5405 
5406 	if (ucmd.comp_mask & (~MLX5_IB_CREATE_WQ_STRIDING_RQ)) {
5407 		mlx5_ib_dbg(dev, "invalid comp mask\n");
5408 		return -EOPNOTSUPP;
5409 	} else if (ucmd.comp_mask & MLX5_IB_CREATE_WQ_STRIDING_RQ) {
5410 		if (!MLX5_CAP_GEN(dev->mdev, striding_rq)) {
5411 			mlx5_ib_dbg(dev, "Striding RQ is not supported\n");
5412 			return -EOPNOTSUPP;
5413 		}
5414 		if ((ucmd.single_stride_log_num_of_bytes <
5415 		    MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES) ||
5416 		    (ucmd.single_stride_log_num_of_bytes >
5417 		     MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES)) {
5418 			mlx5_ib_dbg(dev, "Invalid log stride size (%u. Range is %u - %u)\n",
5419 				    ucmd.single_stride_log_num_of_bytes,
5420 				    MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES,
5421 				    MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES);
5422 			return -EINVAL;
5423 		}
5424 		if (!log_of_strides_valid(dev,
5425 					  ucmd.single_wqe_log_num_of_strides)) {
5426 			mlx5_ib_dbg(
5427 				dev,
5428 				"Invalid log num strides (%u. Range is %u - %u)\n",
5429 				ucmd.single_wqe_log_num_of_strides,
5430 				MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) ?
5431 					MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES :
5432 					MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES,
5433 				MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES);
5434 			return -EINVAL;
5435 		}
5436 		rwq->single_stride_log_num_of_bytes =
5437 			ucmd.single_stride_log_num_of_bytes;
5438 		rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides;
5439 		rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en;
5440 		rwq->create_flags |= MLX5_IB_WQ_FLAGS_STRIDING_RQ;
5441 	}
5442 
5443 	err = set_user_rq_size(dev, init_attr, &ucmd, rwq);
5444 	if (err) {
5445 		mlx5_ib_dbg(dev, "err %d\n", err);
5446 		return err;
5447 	}
5448 
5449 	err = create_user_rq(dev, pd, udata, rwq, &ucmd);
5450 	if (err) {
5451 		mlx5_ib_dbg(dev, "err %d\n", err);
5452 		return err;
5453 	}
5454 
5455 	rwq->user_index = ucmd.user_index;
5456 	return 0;
5457 }
5458 
5459 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
5460 				struct ib_wq_init_attr *init_attr,
5461 				struct ib_udata *udata)
5462 {
5463 	struct mlx5_ib_dev *dev;
5464 	struct mlx5_ib_rwq *rwq;
5465 	struct mlx5_ib_create_wq_resp resp = {};
5466 	size_t min_resp_len;
5467 	int err;
5468 
5469 	if (!udata)
5470 		return ERR_PTR(-ENOSYS);
5471 
5472 	min_resp_len = offsetofend(struct mlx5_ib_create_wq_resp, reserved);
5473 	if (udata->outlen && udata->outlen < min_resp_len)
5474 		return ERR_PTR(-EINVAL);
5475 
5476 	if (!capable(CAP_SYS_RAWIO) &&
5477 	    init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP)
5478 		return ERR_PTR(-EPERM);
5479 
5480 	dev = to_mdev(pd->device);
5481 	switch (init_attr->wq_type) {
5482 	case IB_WQT_RQ:
5483 		rwq = kzalloc(sizeof(*rwq), GFP_KERNEL);
5484 		if (!rwq)
5485 			return ERR_PTR(-ENOMEM);
5486 		err = prepare_user_rq(pd, init_attr, udata, rwq);
5487 		if (err)
5488 			goto err;
5489 		err = create_rq(rwq, pd, init_attr);
5490 		if (err)
5491 			goto err_user_rq;
5492 		break;
5493 	default:
5494 		mlx5_ib_dbg(dev, "unsupported wq type %d\n",
5495 			    init_attr->wq_type);
5496 		return ERR_PTR(-EINVAL);
5497 	}
5498 
5499 	rwq->ibwq.wq_num = rwq->core_qp.qpn;
5500 	rwq->ibwq.state = IB_WQS_RESET;
5501 	if (udata->outlen) {
5502 		resp.response_length = offsetofend(
5503 			struct mlx5_ib_create_wq_resp, response_length);
5504 		err = ib_copy_to_udata(udata, &resp, resp.response_length);
5505 		if (err)
5506 			goto err_copy;
5507 	}
5508 
5509 	rwq->core_qp.event = mlx5_ib_wq_event;
5510 	rwq->ibwq.event_handler = init_attr->event_handler;
5511 	return &rwq->ibwq;
5512 
5513 err_copy:
5514 	mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
5515 err_user_rq:
5516 	destroy_user_rq(dev, pd, rwq, udata);
5517 err:
5518 	kfree(rwq);
5519 	return ERR_PTR(err);
5520 }
5521 
5522 int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
5523 {
5524 	struct mlx5_ib_dev *dev = to_mdev(wq->device);
5525 	struct mlx5_ib_rwq *rwq = to_mrwq(wq);
5526 	int ret;
5527 
5528 	ret = mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
5529 	if (ret)
5530 		return ret;
5531 	destroy_user_rq(dev, wq->pd, rwq, udata);
5532 	kfree(rwq);
5533 	return 0;
5534 }
5535 
5536 int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
5537 				 struct ib_rwq_ind_table_init_attr *init_attr,
5538 				 struct ib_udata *udata)
5539 {
5540 	struct mlx5_ib_rwq_ind_table *rwq_ind_tbl =
5541 		to_mrwq_ind_table(ib_rwq_ind_table);
5542 	struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_table->device);
5543 	int sz = 1 << init_attr->log_ind_tbl_size;
5544 	struct mlx5_ib_create_rwq_ind_tbl_resp resp = {};
5545 	size_t min_resp_len;
5546 	int inlen;
5547 	int err;
5548 	int i;
5549 	u32 *in;
5550 	void *rqtc;
5551 
5552 	if (udata->inlen > 0 &&
5553 	    !ib_is_udata_cleared(udata, 0,
5554 				 udata->inlen))
5555 		return -EOPNOTSUPP;
5556 
5557 	if (init_attr->log_ind_tbl_size >
5558 	    MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
5559 		mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
5560 			    init_attr->log_ind_tbl_size,
5561 			    MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
5562 		return -EINVAL;
5563 	}
5564 
5565 	min_resp_len =
5566 		offsetofend(struct mlx5_ib_create_rwq_ind_tbl_resp, reserved);
5567 	if (udata->outlen && udata->outlen < min_resp_len)
5568 		return -EINVAL;
5569 
5570 	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
5571 	in = kvzalloc(inlen, GFP_KERNEL);
5572 	if (!in)
5573 		return -ENOMEM;
5574 
5575 	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
5576 
5577 	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
5578 	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
5579 
5580 	for (i = 0; i < sz; i++)
5581 		MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num);
5582 
5583 	rwq_ind_tbl->uid = to_mpd(init_attr->ind_tbl[0]->pd)->uid;
5584 	MLX5_SET(create_rqt_in, in, uid, rwq_ind_tbl->uid);
5585 
5586 	err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
5587 	kvfree(in);
5588 	if (err)
5589 		return err;
5590 
5591 	rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn;
5592 	if (udata->outlen) {
5593 		resp.response_length =
5594 			offsetofend(struct mlx5_ib_create_rwq_ind_tbl_resp,
5595 				    response_length);
5596 		err = ib_copy_to_udata(udata, &resp, resp.response_length);
5597 		if (err)
5598 			goto err_copy;
5599 	}
5600 
5601 	return 0;
5602 
5603 err_copy:
5604 	mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
5605 	return err;
5606 }
5607 
5608 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
5609 {
5610 	struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
5611 	struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);
5612 
5613 	return mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
5614 }
5615 
5616 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
5617 		      u32 wq_attr_mask, struct ib_udata *udata)
5618 {
5619 	struct mlx5_ib_dev *dev = to_mdev(wq->device);
5620 	struct mlx5_ib_rwq *rwq = to_mrwq(wq);
5621 	struct mlx5_ib_modify_wq ucmd = {};
5622 	size_t required_cmd_sz;
5623 	int curr_wq_state;
5624 	int wq_state;
5625 	int inlen;
5626 	int err;
5627 	void *rqc;
5628 	void *in;
5629 
5630 	required_cmd_sz = offsetofend(struct mlx5_ib_modify_wq, reserved);
5631 	if (udata->inlen < required_cmd_sz)
5632 		return -EINVAL;
5633 
5634 	if (udata->inlen > sizeof(ucmd) &&
5635 	    !ib_is_udata_cleared(udata, sizeof(ucmd),
5636 				 udata->inlen - sizeof(ucmd)))
5637 		return -EOPNOTSUPP;
5638 
5639 	if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)))
5640 		return -EFAULT;
5641 
5642 	if (ucmd.comp_mask || ucmd.reserved)
5643 		return -EOPNOTSUPP;
5644 
5645 	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
5646 	in = kvzalloc(inlen, GFP_KERNEL);
5647 	if (!in)
5648 		return -ENOMEM;
5649 
5650 	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
5651 
5652 	curr_wq_state = wq_attr->curr_wq_state;
5653 	wq_state = wq_attr->wq_state;
5654 	if (curr_wq_state == IB_WQS_ERR)
5655 		curr_wq_state = MLX5_RQC_STATE_ERR;
5656 	if (wq_state == IB_WQS_ERR)
5657 		wq_state = MLX5_RQC_STATE_ERR;
5658 	MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state);
5659 	MLX5_SET(modify_rq_in, in, uid, to_mpd(wq->pd)->uid);
5660 	MLX5_SET(rqc, rqc, state, wq_state);
5661 
5662 	if (wq_attr_mask & IB_WQ_FLAGS) {
5663 		if (wq_attr->flags_mask & IB_WQ_FLAGS_CVLAN_STRIPPING) {
5664 			if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
5665 			      MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
5666 				mlx5_ib_dbg(dev, "VLAN offloads are not supported\n");
5667 				err = -EOPNOTSUPP;
5668 				goto out;
5669 			}
5670 			MLX5_SET64(modify_rq_in, in, modify_bitmask,
5671 				   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
5672 			MLX5_SET(rqc, rqc, vsd,
5673 				 (wq_attr->flags & IB_WQ_FLAGS_CVLAN_STRIPPING) ? 0 : 1);
5674 		}
5675 
5676 		if (wq_attr->flags_mask & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
5677 			mlx5_ib_dbg(dev, "Modifying scatter end padding is not supported\n");
5678 			err = -EOPNOTSUPP;
5679 			goto out;
5680 		}
5681 	}
5682 
5683 	if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) {
5684 		u16 set_id;
5685 
5686 		set_id = mlx5_ib_get_counters_id(dev, 0);
5687 		if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
5688 			MLX5_SET64(modify_rq_in, in, modify_bitmask,
5689 				   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
5690 			MLX5_SET(rqc, rqc, counter_set_id, set_id);
5691 		} else
5692 			dev_info_once(
5693 				&dev->ib_dev.dev,
5694 				"Receive WQ counters are not supported on current FW\n");
5695 	}
5696 
5697 	err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in);
5698 	if (!err)
5699 		rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;
5700 
5701 out:
5702 	kvfree(in);
5703 	return err;
5704 }
5705 
5706 struct mlx5_ib_drain_cqe {
5707 	struct ib_cqe cqe;
5708 	struct completion done;
5709 };
5710 
5711 static void mlx5_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
5712 {
5713 	struct mlx5_ib_drain_cqe *cqe = container_of(wc->wr_cqe,
5714 						     struct mlx5_ib_drain_cqe,
5715 						     cqe);
5716 
5717 	complete(&cqe->done);
5718 }
5719 
5720 /* This function returns only once the drained WR was completed */
5721 static void handle_drain_completion(struct ib_cq *cq,
5722 				    struct mlx5_ib_drain_cqe *sdrain,
5723 				    struct mlx5_ib_dev *dev)
5724 {
5725 	struct mlx5_core_dev *mdev = dev->mdev;
5726 
5727 	if (cq->poll_ctx == IB_POLL_DIRECT) {
5728 		while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0)
5729 			ib_process_cq_direct(cq, -1);
5730 		return;
5731 	}
5732 
5733 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
5734 		struct mlx5_ib_cq *mcq = to_mcq(cq);
5735 		bool triggered = false;
5736 		unsigned long flags;
5737 
5738 		spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
5739 		/* Make sure that the CQ handler won't run if wasn't run yet */
5740 		if (!mcq->mcq.reset_notify_added)
5741 			mcq->mcq.reset_notify_added = 1;
5742 		else
5743 			triggered = true;
5744 		spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
5745 
5746 		if (triggered) {
5747 			/* Wait for any scheduled/running task to be ended */
5748 			switch (cq->poll_ctx) {
5749 			case IB_POLL_SOFTIRQ:
5750 				irq_poll_disable(&cq->iop);
5751 				irq_poll_enable(&cq->iop);
5752 				break;
5753 			case IB_POLL_WORKQUEUE:
5754 				cancel_work_sync(&cq->work);
5755 				break;
5756 			default:
5757 				WARN_ON_ONCE(1);
5758 			}
5759 		}
5760 
5761 		/* Run the CQ handler - this makes sure that the drain WR will
5762 		 * be processed if wasn't processed yet.
5763 		 */
5764 		mcq->mcq.comp(&mcq->mcq, NULL);
5765 	}
5766 
5767 	wait_for_completion(&sdrain->done);
5768 }
5769 
5770 void mlx5_ib_drain_sq(struct ib_qp *qp)
5771 {
5772 	struct ib_cq *cq = qp->send_cq;
5773 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
5774 	struct mlx5_ib_drain_cqe sdrain;
5775 	const struct ib_send_wr *bad_swr;
5776 	struct ib_rdma_wr swr = {
5777 		.wr = {
5778 			.next = NULL,
5779 			{ .wr_cqe	= &sdrain.cqe, },
5780 			.opcode	= IB_WR_RDMA_WRITE,
5781 		},
5782 	};
5783 	int ret;
5784 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
5785 	struct mlx5_core_dev *mdev = dev->mdev;
5786 
5787 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
5788 	if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
5789 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
5790 		return;
5791 	}
5792 
5793 	sdrain.cqe.done = mlx5_ib_drain_qp_done;
5794 	init_completion(&sdrain.done);
5795 
5796 	ret = mlx5_ib_post_send_drain(qp, &swr.wr, &bad_swr);
5797 	if (ret) {
5798 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
5799 		return;
5800 	}
5801 
5802 	handle_drain_completion(cq, &sdrain, dev);
5803 }
5804 
5805 void mlx5_ib_drain_rq(struct ib_qp *qp)
5806 {
5807 	struct ib_cq *cq = qp->recv_cq;
5808 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
5809 	struct mlx5_ib_drain_cqe rdrain;
5810 	struct ib_recv_wr rwr = {};
5811 	const struct ib_recv_wr *bad_rwr;
5812 	int ret;
5813 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
5814 	struct mlx5_core_dev *mdev = dev->mdev;
5815 
5816 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
5817 	if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
5818 		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
5819 		return;
5820 	}
5821 
5822 	rwr.wr_cqe = &rdrain.cqe;
5823 	rdrain.cqe.done = mlx5_ib_drain_qp_done;
5824 	init_completion(&rdrain.done);
5825 
5826 	ret = mlx5_ib_post_recv_drain(qp, &rwr, &bad_rwr);
5827 	if (ret) {
5828 		WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
5829 		return;
5830 	}
5831 
5832 	handle_drain_completion(cq, &rdrain, dev);
5833 }
5834 
5835 /*
5836  * Bind a qp to a counter. If @counter is NULL then bind the qp to
5837  * the default counter
5838  */
5839 int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
5840 {
5841 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
5842 	struct mlx5_ib_qp *mqp = to_mqp(qp);
5843 	int err = 0;
5844 
5845 	mutex_lock(&mqp->mutex);
5846 	if (mqp->state == IB_QPS_RESET) {
5847 		qp->counter = counter;
5848 		goto out;
5849 	}
5850 
5851 	if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) {
5852 		err = -EOPNOTSUPP;
5853 		goto out;
5854 	}
5855 
5856 	if (mqp->state == IB_QPS_RTS) {
5857 		err = __mlx5_ib_qp_set_counter(qp, counter);
5858 		if (!err)
5859 			qp->counter = counter;
5860 
5861 		goto out;
5862 	}
5863 
5864 	mqp->counter_pending = 1;
5865 	qp->counter = counter;
5866 
5867 out:
5868 	mutex_unlock(&mqp->mutex);
5869 	return err;
5870 }
5871 
5872 int mlx5_ib_qp_event_init(void)
5873 {
5874 	mlx5_ib_qp_event_wq = alloc_ordered_workqueue("mlx5_ib_qp_event_wq", 0);
5875 	if (!mlx5_ib_qp_event_wq)
5876 		return -ENOMEM;
5877 
5878 	return 0;
5879 }
5880 
5881 void mlx5_ib_qp_event_cleanup(void)
5882 {
5883 	destroy_workqueue(mlx5_ib_qp_event_wq);
5884 }
5885