xref: /linux/drivers/infiniband/hw/hns/hns_roce_qp.c (revision 8818ffb04bfa168dfe5056cd24cee5211dcc4b3c)
1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <rdma/ib_addr.h>
35 #include <rdma/ib_umem.h>
36 #include <rdma/uverbs_ioctl.h>
37 #include "hns_roce_common.h"
38 #include "hns_roce_device.h"
39 #include "hns_roce_hem.h"
40 
41 static struct hns_roce_qp *hns_roce_qp_lookup(struct hns_roce_dev *hr_dev,
42 					      u32 qpn)
43 {
44 	struct device *dev = hr_dev->dev;
45 	struct hns_roce_qp *qp;
46 	unsigned long flags;
47 
48 	xa_lock_irqsave(&hr_dev->qp_table_xa, flags);
49 	qp = __hns_roce_qp_lookup(hr_dev, qpn);
50 	if (qp)
51 		refcount_inc(&qp->refcount);
52 	xa_unlock_irqrestore(&hr_dev->qp_table_xa, flags);
53 
54 	if (!qp)
55 		dev_warn(dev, "async event for bogus QP %08x\n", qpn);
56 
57 	return qp;
58 }
59 
60 static void flush_work_handle(struct work_struct *work)
61 {
62 	struct hns_roce_work *flush_work = container_of(work,
63 					struct hns_roce_work, work);
64 	struct hns_roce_qp *hr_qp = container_of(flush_work,
65 					struct hns_roce_qp, flush_work);
66 	struct device *dev = flush_work->hr_dev->dev;
67 	struct ib_qp_attr attr;
68 	int attr_mask;
69 	int ret;
70 
71 	attr_mask = IB_QP_STATE;
72 	attr.qp_state = IB_QPS_ERR;
73 
74 	if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) {
75 		ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
76 		if (ret)
77 			dev_err(dev, "modify QP to error state failed(%d) during CQE flush\n",
78 				ret);
79 	}
80 
81 	/*
82 	 * make sure we signal QP destroy leg that flush QP was completed
83 	 * so that it can safely proceed ahead now and destroy QP
84 	 */
85 	if (refcount_dec_and_test(&hr_qp->refcount))
86 		complete(&hr_qp->free);
87 }
88 
89 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
90 {
91 	struct hns_roce_work *flush_work = &hr_qp->flush_work;
92 	unsigned long flags;
93 
94 	spin_lock_irqsave(&hr_qp->flush_lock, flags);
95 	/* Exit directly after destroy_qp() */
96 	if (test_bit(HNS_ROCE_STOP_FLUSH_FLAG, &hr_qp->flush_flag)) {
97 		spin_unlock_irqrestore(&hr_qp->flush_lock, flags);
98 		return;
99 	}
100 
101 	refcount_inc(&hr_qp->refcount);
102 	queue_work(hr_dev->irq_workq, &flush_work->work);
103 	spin_unlock_irqrestore(&hr_qp->flush_lock, flags);
104 }
105 
106 void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp)
107 {
108 	/*
109 	 * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state
110 	 * gets into errored mode. Hence, as a workaround to this
111 	 * hardware limitation, driver needs to assist in flushing. But
112 	 * the flushing operation uses mailbox to convey the QP state to
113 	 * the hardware and which can sleep due to the mutex protection
114 	 * around the mailbox calls. Hence, use the deferred flush for
115 	 * now.
116 	 */
117 	if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
118 		init_flush_work(dev, qp);
119 }
120 
121 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
122 {
123 	struct hns_roce_qp *qp;
124 
125 	qp = hns_roce_qp_lookup(hr_dev, qpn);
126 	if (!qp)
127 		return;
128 
129 	qp->event(qp, (enum hns_roce_event)event_type);
130 
131 	if (refcount_dec_and_test(&qp->refcount))
132 		complete(&qp->free);
133 }
134 
135 void hns_roce_flush_cqe(struct hns_roce_dev *hr_dev, u32 qpn)
136 {
137 	struct hns_roce_qp *qp;
138 
139 	qp = hns_roce_qp_lookup(hr_dev, qpn);
140 	if (!qp)
141 		return;
142 
143 	qp->state = IB_QPS_ERR;
144 	flush_cqe(hr_dev, qp);
145 
146 	if (refcount_dec_and_test(&qp->refcount))
147 		complete(&qp->free);
148 }
149 
150 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
151 				 enum hns_roce_event type)
152 {
153 	struct ib_qp *ibqp = &hr_qp->ibqp;
154 	struct ib_event event;
155 
156 	if (ibqp->event_handler) {
157 		event.device = ibqp->device;
158 		event.element.qp = ibqp;
159 		switch (type) {
160 		case HNS_ROCE_EVENT_TYPE_PATH_MIG:
161 			event.event = IB_EVENT_PATH_MIG;
162 			break;
163 		case HNS_ROCE_EVENT_TYPE_COMM_EST:
164 			event.event = IB_EVENT_COMM_EST;
165 			break;
166 		case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
167 			event.event = IB_EVENT_SQ_DRAINED;
168 			break;
169 		case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
170 			event.event = IB_EVENT_QP_LAST_WQE_REACHED;
171 			break;
172 		case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
173 			event.event = IB_EVENT_QP_FATAL;
174 			break;
175 		case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
176 			event.event = IB_EVENT_PATH_MIG_ERR;
177 			break;
178 		case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
179 			event.event = IB_EVENT_QP_REQ_ERR;
180 			break;
181 		case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
182 		case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
183 		case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
184 			event.event = IB_EVENT_QP_ACCESS_ERR;
185 			break;
186 		default:
187 			dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
188 				type, hr_qp->qpn);
189 			return;
190 		}
191 		ibqp->event_handler(&event, ibqp->qp_context);
192 	}
193 }
194 
195 static u8 get_affinity_cq_bank(u8 qp_bank)
196 {
197 	return (qp_bank >> 1) & CQ_BANKID_MASK;
198 }
199 
200 static u8 get_least_load_bankid_for_qp(struct hns_roce_bank *bank, u8 valid_qp_bank_mask)
201 {
202 #define INVALID_LOAD_QPNUM 0xFFFFFFFF
203 	u32 least_load = INVALID_LOAD_QPNUM;
204 	u8 bankid = 0;
205 	u32 bankcnt;
206 	u8 i;
207 
208 	for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) {
209 		if (!(valid_qp_bank_mask & BIT(i)))
210 			continue;
211 
212 		bankcnt = bank[i].inuse;
213 		if (bankcnt < least_load) {
214 			least_load = bankcnt;
215 			bankid = i;
216 		}
217 	}
218 
219 	return bankid;
220 }
221 
222 static int alloc_qpn_with_bankid(struct hns_roce_bank *bank, u8 bankid,
223 				 unsigned long *qpn)
224 {
225 	int id;
226 
227 	id = ida_alloc_range(&bank->ida, bank->next, bank->max, GFP_KERNEL);
228 	if (id < 0) {
229 		id = ida_alloc_range(&bank->ida, bank->min, bank->max,
230 				     GFP_KERNEL);
231 		if (id < 0)
232 			return id;
233 	}
234 
235 	/* the QPN should keep increasing until the max value is reached. */
236 	bank->next = (id + 1) > bank->max ? bank->min : id + 1;
237 
238 	/* the lower 3 bits is bankid */
239 	*qpn = (id << 3) | bankid;
240 
241 	return 0;
242 }
243 
244 static bool use_ext_sge(struct ib_qp_init_attr *init_attr)
245 {
246 	return init_attr->cap.max_send_sge > HNS_ROCE_SGE_IN_WQE ||
247 		init_attr->qp_type == IB_QPT_UD ||
248 		init_attr->qp_type == IB_QPT_GSI;
249 }
250 
251 static u8 select_qp_bankid(struct hns_roce_dev *hr_dev,
252 			   struct ib_qp_init_attr *init_attr)
253 {
254 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
255 	struct hns_roce_bank *bank = qp_table->bank;
256 	struct ib_cq *scq = init_attr->send_cq;
257 	u8 valid_qp_bank_mask = 0;
258 	unsigned long cqn = 0;
259 	u8 i;
260 
261 	if (scq)
262 		cqn = to_hr_cq(scq)->cqn;
263 
264 	for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) {
265 		if (scq && (get_affinity_cq_bank(i) != (cqn & CQ_BANKID_MASK)))
266 			continue;
267 
268 		if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_LIMIT_BANK) &&
269 		    use_ext_sge(init_attr) &&
270 		    !(VALID_EXT_SGE_QP_BANK_MASK_LIMIT & BIT(i)))
271 			continue;
272 
273 		valid_qp_bank_mask |= BIT(i);
274 	}
275 
276 	return get_least_load_bankid_for_qp(bank, valid_qp_bank_mask);
277 }
278 
279 static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
280 		     struct ib_qp_init_attr *init_attr)
281 {
282 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
283 	unsigned long num = 0;
284 	u8 bankid;
285 	int ret;
286 
287 	if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
288 		num = 1;
289 	} else {
290 		mutex_lock(&qp_table->bank_mutex);
291 		bankid = select_qp_bankid(hr_dev, init_attr);
292 		ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
293 					    &num);
294 		if (ret) {
295 			ibdev_err(&hr_dev->ib_dev,
296 				  "failed to alloc QPN, ret = %d\n", ret);
297 			mutex_unlock(&qp_table->bank_mutex);
298 			return ret;
299 		}
300 
301 		qp_table->bank[bankid].inuse++;
302 		mutex_unlock(&qp_table->bank_mutex);
303 	}
304 
305 	hr_qp->qpn = num;
306 
307 	return 0;
308 }
309 
310 static void add_qp_to_list(struct hns_roce_dev *hr_dev,
311 			   struct hns_roce_qp *hr_qp,
312 			   struct ib_cq *send_cq, struct ib_cq *recv_cq)
313 {
314 	struct hns_roce_cq *hr_send_cq, *hr_recv_cq;
315 	unsigned long flags;
316 
317 	hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL;
318 	hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL;
319 
320 	spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
321 	hns_roce_lock_cqs(hr_send_cq, hr_recv_cq);
322 
323 	list_add_tail(&hr_qp->node, &hr_dev->qp_list);
324 	if (hr_send_cq)
325 		list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list);
326 	if (hr_recv_cq)
327 		list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list);
328 
329 	hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq);
330 	spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
331 }
332 
333 static int hns_roce_qp_store(struct hns_roce_dev *hr_dev,
334 			     struct hns_roce_qp *hr_qp,
335 			     struct ib_qp_init_attr *init_attr)
336 {
337 	struct xarray *xa = &hr_dev->qp_table_xa;
338 	int ret;
339 
340 	if (!hr_qp->qpn)
341 		return -EINVAL;
342 
343 	ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL));
344 	if (ret)
345 		dev_err(hr_dev->dev, "failed to xa store for QPC\n");
346 	else
347 		/* add QP to device's QP list for softwc */
348 		add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq,
349 			       init_attr->recv_cq);
350 
351 	return ret;
352 }
353 
354 static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
355 {
356 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
357 	struct device *dev = hr_dev->dev;
358 	int ret;
359 
360 	if (!hr_qp->qpn)
361 		return -EINVAL;
362 
363 	/* Alloc memory for QPC */
364 	ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
365 	if (ret) {
366 		dev_err(dev, "failed to get QPC table\n");
367 		goto err_out;
368 	}
369 
370 	/* Alloc memory for IRRL */
371 	ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
372 	if (ret) {
373 		dev_err(dev, "failed to get IRRL table\n");
374 		goto err_put_qp;
375 	}
376 
377 	if (hr_dev->caps.trrl_entry_sz) {
378 		/* Alloc memory for TRRL */
379 		ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
380 					 hr_qp->qpn);
381 		if (ret) {
382 			dev_err(dev, "failed to get TRRL table\n");
383 			goto err_put_irrl;
384 		}
385 	}
386 
387 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
388 		/* Alloc memory for SCC CTX */
389 		ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
390 					 hr_qp->qpn);
391 		if (ret) {
392 			dev_err(dev, "failed to get SCC CTX table\n");
393 			goto err_put_trrl;
394 		}
395 	}
396 
397 	return 0;
398 
399 err_put_trrl:
400 	if (hr_dev->caps.trrl_entry_sz)
401 		hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
402 
403 err_put_irrl:
404 	hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
405 
406 err_put_qp:
407 	hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
408 
409 err_out:
410 	return ret;
411 }
412 
413 static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp)
414 {
415 	rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry);
416 }
417 
418 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
419 {
420 	struct xarray *xa = &hr_dev->qp_table_xa;
421 	unsigned long flags;
422 
423 	list_del(&hr_qp->node);
424 
425 	if (hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
426 		list_del(&hr_qp->sq_node);
427 
428 	if (hr_qp->ibqp.qp_type != IB_QPT_XRC_INI &&
429 	    hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
430 		list_del(&hr_qp->rq_node);
431 
432 	xa_lock_irqsave(xa, flags);
433 	__xa_erase(xa, hr_qp->qpn);
434 	xa_unlock_irqrestore(xa, flags);
435 }
436 
437 static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
438 {
439 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
440 
441 	if (hr_dev->caps.trrl_entry_sz)
442 		hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
443 	hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
444 }
445 
446 static inline u8 get_qp_bankid(unsigned long qpn)
447 {
448 	/* The lower 3 bits of QPN are used to hash to different banks */
449 	return (u8)(qpn & GENMASK(2, 0));
450 }
451 
452 static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
453 {
454 	u8 bankid;
455 
456 	if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
457 		return;
458 
459 	if (hr_qp->qpn < hr_dev->caps.reserved_qps)
460 		return;
461 
462 	bankid = get_qp_bankid(hr_qp->qpn);
463 
464 	ida_free(&hr_dev->qp_table.bank[bankid].ida,
465 		 hr_qp->qpn / HNS_ROCE_QP_BANK_NUM);
466 
467 	mutex_lock(&hr_dev->qp_table.bank_mutex);
468 	hr_dev->qp_table.bank[bankid].inuse--;
469 	mutex_unlock(&hr_dev->qp_table.bank_mutex);
470 }
471 
472 static u32 proc_rq_sge(struct hns_roce_dev *dev, struct hns_roce_qp *hr_qp,
473 		       bool user)
474 {
475 	u32 max_sge = dev->caps.max_rq_sg;
476 
477 	if (dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
478 		return max_sge;
479 
480 	/* Reserve SGEs only for HIP08 in kernel; The userspace driver will
481 	 * calculate number of max_sge with reserved SGEs when allocating wqe
482 	 * buf, so there is no need to do this again in kernel. But the number
483 	 * may exceed the capacity of SGEs recorded in the firmware, so the
484 	 * kernel driver should just adapt the value accordingly.
485 	 */
486 	if (user)
487 		max_sge = roundup_pow_of_two(max_sge + 1);
488 	else
489 		hr_qp->rq.rsv_sge = 1;
490 
491 	return max_sge;
492 }
493 
494 static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
495 		       struct hns_roce_qp *hr_qp, int has_rq, bool user)
496 {
497 	u32 max_sge = proc_rq_sge(hr_dev, hr_qp, user);
498 	u32 cnt;
499 
500 	/* If srq exist, set zero for relative number of rq */
501 	if (!has_rq) {
502 		hr_qp->rq.wqe_cnt = 0;
503 		hr_qp->rq.max_gs = 0;
504 		cap->max_recv_wr = 0;
505 		cap->max_recv_sge = 0;
506 
507 		return 0;
508 	}
509 
510 	/* Check the validity of QP support capacity */
511 	if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes ||
512 	    cap->max_recv_sge > max_sge) {
513 		ibdev_err(&hr_dev->ib_dev,
514 			  "RQ config error, depth = %u, sge = %u\n",
515 			  cap->max_recv_wr, cap->max_recv_sge);
516 		return -EINVAL;
517 	}
518 
519 	cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes));
520 	if (cnt > hr_dev->caps.max_wqes) {
521 		ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n",
522 			  cap->max_recv_wr);
523 		return -EINVAL;
524 	}
525 
526 	hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
527 					      hr_qp->rq.rsv_sge);
528 
529 	hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
530 				    hr_qp->rq.max_gs);
531 
532 	hr_qp->rq.wqe_cnt = cnt;
533 
534 	cap->max_recv_wr = cnt;
535 	cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
536 
537 	return 0;
538 }
539 
540 static u32 get_max_inline_data(struct hns_roce_dev *hr_dev,
541 			       struct ib_qp_cap *cap)
542 {
543 	if (cap->max_inline_data) {
544 		cap->max_inline_data = roundup_pow_of_two(cap->max_inline_data);
545 		return min(cap->max_inline_data,
546 			   hr_dev->caps.max_sq_inline);
547 	}
548 
549 	return 0;
550 }
551 
552 static void update_inline_data(struct hns_roce_qp *hr_qp,
553 			       struct ib_qp_cap *cap)
554 {
555 	u32 sge_num = hr_qp->sq.ext_sge_cnt;
556 
557 	if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) {
558 		if (!(hr_qp->ibqp.qp_type == IB_QPT_GSI ||
559 		      hr_qp->ibqp.qp_type == IB_QPT_UD))
560 			sge_num = max((u32)HNS_ROCE_SGE_IN_WQE, sge_num);
561 
562 		cap->max_inline_data = max(cap->max_inline_data,
563 					   sge_num * HNS_ROCE_SGE_SIZE);
564 	}
565 
566 	hr_qp->max_inline_data = cap->max_inline_data;
567 }
568 
569 static u32 get_sge_num_from_max_send_sge(bool is_ud_or_gsi,
570 					 u32 max_send_sge)
571 {
572 	unsigned int std_sge_num;
573 	unsigned int min_sge;
574 
575 	std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE;
576 	min_sge = is_ud_or_gsi ? 1 : 0;
577 	return max_send_sge > std_sge_num ? (max_send_sge - std_sge_num) :
578 				min_sge;
579 }
580 
581 static unsigned int get_sge_num_from_max_inl_data(bool is_ud_or_gsi,
582 						  u32 max_inline_data)
583 {
584 	unsigned int inline_sge;
585 
586 	if (!max_inline_data)
587 		return 0;
588 
589 	/*
590 	 * if max_inline_data less than
591 	 * HNS_ROCE_SGE_IN_WQE * HNS_ROCE_SGE_SIZE,
592 	 * In addition to ud's mode, no need to extend sge.
593 	 */
594 	inline_sge = roundup_pow_of_two(max_inline_data) / HNS_ROCE_SGE_SIZE;
595 	if (!is_ud_or_gsi && inline_sge <= HNS_ROCE_SGE_IN_WQE)
596 		inline_sge = 0;
597 
598 	return inline_sge;
599 }
600 
601 static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
602 			      struct hns_roce_qp *hr_qp, struct ib_qp_cap *cap)
603 {
604 	bool is_ud_or_gsi = (hr_qp->ibqp.qp_type == IB_QPT_GSI ||
605 				hr_qp->ibqp.qp_type == IB_QPT_UD);
606 	unsigned int std_sge_num;
607 	u32 inline_ext_sge = 0;
608 	u32 ext_wqe_sge_cnt;
609 	u32 total_sge_cnt;
610 
611 	cap->max_inline_data = get_max_inline_data(hr_dev, cap);
612 
613 	hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
614 	std_sge_num = is_ud_or_gsi ? 0 : HNS_ROCE_SGE_IN_WQE;
615 	ext_wqe_sge_cnt = get_sge_num_from_max_send_sge(is_ud_or_gsi,
616 							cap->max_send_sge);
617 
618 	if (hr_qp->config & HNS_ROCE_EXSGE_FLAGS) {
619 		inline_ext_sge = max(ext_wqe_sge_cnt,
620 				     get_sge_num_from_max_inl_data(is_ud_or_gsi,
621 							 cap->max_inline_data));
622 		hr_qp->sq.ext_sge_cnt = inline_ext_sge ?
623 					roundup_pow_of_two(inline_ext_sge) : 0;
624 
625 		hr_qp->sq.max_gs = max(1U, (hr_qp->sq.ext_sge_cnt + std_sge_num));
626 		hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg);
627 
628 		ext_wqe_sge_cnt = hr_qp->sq.ext_sge_cnt;
629 	} else {
630 		hr_qp->sq.max_gs = max(1U, cap->max_send_sge);
631 		hr_qp->sq.max_gs = min(hr_qp->sq.max_gs, hr_dev->caps.max_sq_sg);
632 		hr_qp->sq.ext_sge_cnt = hr_qp->sq.max_gs;
633 	}
634 
635 	/* If the number of extended sge is not zero, they MUST use the
636 	 * space of HNS_HW_PAGE_SIZE at least.
637 	 */
638 	if (ext_wqe_sge_cnt) {
639 		total_sge_cnt = roundup_pow_of_two(sq_wqe_cnt * ext_wqe_sge_cnt);
640 		hr_qp->sge.sge_cnt = max(total_sge_cnt,
641 				(u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE);
642 	}
643 
644 	update_inline_data(hr_qp, cap);
645 }
646 
647 static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
648 					struct ib_qp_cap *cap,
649 					struct hns_roce_ib_create_qp *ucmd)
650 {
651 	u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
652 	u8 max_sq_stride = ilog2(roundup_sq_stride);
653 
654 	/* Sanity check SQ size before proceeding */
655 	if (ucmd->log_sq_stride > max_sq_stride ||
656 	    ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
657 		ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n");
658 		return -EINVAL;
659 	}
660 
661 	if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
662 		ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n",
663 			  cap->max_send_sge);
664 		return -EINVAL;
665 	}
666 
667 	return 0;
668 }
669 
670 static int set_user_sq_size(struct hns_roce_dev *hr_dev,
671 			    struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp,
672 			    struct hns_roce_ib_create_qp *ucmd)
673 {
674 	struct ib_device *ibdev = &hr_dev->ib_dev;
675 	u32 cnt = 0;
676 	int ret;
677 
678 	if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
679 	    cnt > hr_dev->caps.max_wqes)
680 		return -EINVAL;
681 
682 	ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
683 	if (ret) {
684 		ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n",
685 			  ret);
686 		return ret;
687 	}
688 
689 	set_ext_sge_param(hr_dev, cnt, hr_qp, cap);
690 
691 	hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
692 	hr_qp->sq.wqe_cnt = cnt;
693 
694 	return 0;
695 }
696 
697 static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev,
698 			    struct hns_roce_qp *hr_qp,
699 			    struct hns_roce_buf_attr *buf_attr)
700 {
701 	int buf_size;
702 	int idx = 0;
703 
704 	hr_qp->buff_size = 0;
705 
706 	/* SQ WQE */
707 	hr_qp->sq.offset = 0;
708 	buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt,
709 					  hr_qp->sq.wqe_shift);
710 	if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
711 		buf_attr->region[idx].size = buf_size;
712 		buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num;
713 		idx++;
714 		hr_qp->buff_size += buf_size;
715 	}
716 
717 	/* extend SGE WQE in SQ */
718 	hr_qp->sge.offset = hr_qp->buff_size;
719 	buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt,
720 					  hr_qp->sge.sge_shift);
721 	if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
722 		buf_attr->region[idx].size = buf_size;
723 		buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num;
724 		idx++;
725 		hr_qp->buff_size += buf_size;
726 	}
727 
728 	/* RQ WQE */
729 	hr_qp->rq.offset = hr_qp->buff_size;
730 	buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt,
731 					  hr_qp->rq.wqe_shift);
732 	if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
733 		buf_attr->region[idx].size = buf_size;
734 		buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num;
735 		idx++;
736 		hr_qp->buff_size += buf_size;
737 	}
738 
739 	if (hr_qp->buff_size < 1)
740 		return -EINVAL;
741 
742 	buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
743 	buf_attr->region_count = idx;
744 
745 	return 0;
746 }
747 
748 static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
749 			      struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp)
750 {
751 	struct ib_device *ibdev = &hr_dev->ib_dev;
752 	u32 cnt;
753 
754 	if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
755 	    cap->max_send_sge > hr_dev->caps.max_sq_sg) {
756 		ibdev_err(ibdev, "failed to check SQ WR or SGE num.\n");
757 		return -EINVAL;
758 	}
759 
760 	cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes));
761 	if (cnt > hr_dev->caps.max_wqes) {
762 		ibdev_err(ibdev, "failed to check WQE num, WQE num = %u.\n",
763 			  cnt);
764 		return -EINVAL;
765 	}
766 
767 	hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
768 	hr_qp->sq.wqe_cnt = cnt;
769 
770 	set_ext_sge_param(hr_dev, cnt, hr_qp, cap);
771 
772 	/* sync the parameters of kernel QP to user's configuration */
773 	cap->max_send_wr = cnt;
774 
775 	return 0;
776 }
777 
778 static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
779 {
780 	if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
781 		return 0;
782 
783 	return 1;
784 }
785 
786 static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
787 {
788 	if (attr->qp_type == IB_QPT_XRC_INI ||
789 	    attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
790 	    !attr->cap.max_recv_wr)
791 		return 0;
792 
793 	return 1;
794 }
795 
796 static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
797 			struct ib_qp_init_attr *init_attr,
798 			struct ib_udata *udata, unsigned long addr)
799 {
800 	struct ib_device *ibdev = &hr_dev->ib_dev;
801 	struct hns_roce_buf_attr buf_attr = {};
802 	int ret;
803 
804 	ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr);
805 	if (ret) {
806 		ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret);
807 		goto err_inline;
808 	}
809 	ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr,
810 				  PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz,
811 				  udata, addr);
812 	if (ret) {
813 		ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret);
814 		goto err_inline;
815 	}
816 
817 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE)
818 		hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE;
819 
820 	return 0;
821 
822 err_inline:
823 
824 	return ret;
825 }
826 
827 static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
828 {
829 	hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr);
830 }
831 
832 static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
833 				   struct ib_qp_init_attr *init_attr,
834 				   struct ib_udata *udata,
835 				   struct hns_roce_ib_create_qp_resp *resp,
836 				   struct hns_roce_ib_create_qp *ucmd)
837 {
838 	return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
839 		udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
840 		hns_roce_qp_has_sq(init_attr) &&
841 		udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr));
842 }
843 
844 static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
845 				   struct ib_qp_init_attr *init_attr,
846 				   struct ib_udata *udata,
847 				   struct hns_roce_ib_create_qp_resp *resp)
848 {
849 	return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
850 		udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
851 		hns_roce_qp_has_rq(init_attr));
852 }
853 
854 static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
855 				     struct ib_qp_init_attr *init_attr)
856 {
857 	return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
858 		hns_roce_qp_has_rq(init_attr));
859 }
860 
861 static int qp_mmap_entry(struct hns_roce_qp *hr_qp,
862 			 struct hns_roce_dev *hr_dev,
863 			 struct ib_udata *udata,
864 			 struct hns_roce_ib_create_qp_resp *resp)
865 {
866 	struct hns_roce_ucontext *uctx =
867 		rdma_udata_to_drv_context(udata,
868 			struct hns_roce_ucontext, ibucontext);
869 	struct rdma_user_mmap_entry *rdma_entry;
870 	u64 address;
871 
872 	address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE;
873 
874 	hr_qp->dwqe_mmap_entry =
875 		hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address,
876 						HNS_ROCE_DWQE_SIZE,
877 						HNS_ROCE_MMAP_TYPE_DWQE);
878 
879 	if (!hr_qp->dwqe_mmap_entry) {
880 		ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n");
881 		return -ENOMEM;
882 	}
883 
884 	rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry;
885 	resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry);
886 
887 	return 0;
888 }
889 
890 static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
891 			    struct hns_roce_qp *hr_qp,
892 			    struct ib_qp_init_attr *init_attr,
893 			    struct ib_udata *udata,
894 			    struct hns_roce_ib_create_qp *ucmd,
895 			    struct hns_roce_ib_create_qp_resp *resp)
896 {
897 	bool has_sdb = user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd);
898 	struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
899 		struct hns_roce_ucontext, ibucontext);
900 	bool has_rdb = user_qp_has_rdb(hr_dev, init_attr, udata, resp);
901 	struct ib_device *ibdev = &hr_dev->ib_dev;
902 	int ret;
903 
904 	if (has_sdb) {
905 		ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb);
906 		if (ret) {
907 			ibdev_err(ibdev,
908 				  "failed to map user SQ doorbell, ret = %d.\n",
909 				  ret);
910 			goto err_out;
911 		}
912 		hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
913 	}
914 
915 	if (has_rdb) {
916 		ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb);
917 		if (ret) {
918 			ibdev_err(ibdev,
919 				  "failed to map user RQ doorbell, ret = %d.\n",
920 				  ret);
921 			goto err_sdb;
922 		}
923 		hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
924 	}
925 
926 	return 0;
927 
928 err_sdb:
929 	if (has_sdb)
930 		hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
931 err_out:
932 	return ret;
933 }
934 
935 static int alloc_kernel_qp_db(struct hns_roce_dev *hr_dev,
936 			      struct hns_roce_qp *hr_qp,
937 			      struct ib_qp_init_attr *init_attr)
938 {
939 	struct ib_device *ibdev = &hr_dev->ib_dev;
940 	int ret;
941 
942 	if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
943 		hr_qp->sq.db_reg = hr_dev->mem_base +
944 				   HNS_ROCE_DWQE_SIZE * hr_qp->qpn;
945 	else
946 		hr_qp->sq.db_reg = hr_dev->reg_base + hr_dev->sdb_offset +
947 				   DB_REG_OFFSET * hr_dev->priv_uar.index;
948 
949 	hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset +
950 			   DB_REG_OFFSET * hr_dev->priv_uar.index;
951 
952 	if (kernel_qp_has_rdb(hr_dev, init_attr)) {
953 		ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
954 		if (ret) {
955 			ibdev_err(ibdev,
956 				  "failed to alloc kernel RQ doorbell, ret = %d.\n",
957 				  ret);
958 			return ret;
959 		}
960 		*hr_qp->rdb.db_record = 0;
961 		hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
962 	}
963 
964 	return 0;
965 }
966 
967 static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
968 		       struct ib_qp_init_attr *init_attr,
969 		       struct ib_udata *udata,
970 		       struct hns_roce_ib_create_qp *ucmd,
971 		       struct hns_roce_ib_create_qp_resp *resp)
972 {
973 	int ret;
974 
975 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE)
976 		hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB;
977 
978 	if (udata) {
979 		if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) {
980 			ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp);
981 			if (ret)
982 				return ret;
983 		}
984 
985 		ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd,
986 				       resp);
987 		if (ret)
988 			goto err_remove_qp;
989 	} else {
990 		ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr);
991 		if (ret)
992 			return ret;
993 	}
994 
995 	return 0;
996 
997 err_remove_qp:
998 	if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)
999 		qp_user_mmap_entry_remove(hr_qp);
1000 
1001 	return ret;
1002 }
1003 
1004 static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
1005 		       struct ib_udata *udata)
1006 {
1007 	struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
1008 		udata, struct hns_roce_ucontext, ibucontext);
1009 
1010 	if (udata) {
1011 		if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
1012 			hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
1013 		if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
1014 			hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
1015 		if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)
1016 			qp_user_mmap_entry_remove(hr_qp);
1017 	} else {
1018 		if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
1019 			hns_roce_free_db(hr_dev, &hr_qp->rdb);
1020 	}
1021 }
1022 
1023 static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev,
1024 			     struct hns_roce_qp *hr_qp)
1025 {
1026 	struct ib_device *ibdev = &hr_dev->ib_dev;
1027 	u64 *sq_wrid = NULL;
1028 	u64 *rq_wrid = NULL;
1029 	int ret;
1030 
1031 	sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL);
1032 	if (!sq_wrid) {
1033 		ibdev_err(ibdev, "failed to alloc SQ wrid.\n");
1034 		return -ENOMEM;
1035 	}
1036 
1037 	if (hr_qp->rq.wqe_cnt) {
1038 		rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
1039 		if (!rq_wrid) {
1040 			ibdev_err(ibdev, "failed to alloc RQ wrid.\n");
1041 			ret = -ENOMEM;
1042 			goto err_sq;
1043 		}
1044 	}
1045 
1046 	hr_qp->sq.wrid = sq_wrid;
1047 	hr_qp->rq.wrid = rq_wrid;
1048 	return 0;
1049 err_sq:
1050 	kfree(sq_wrid);
1051 
1052 	return ret;
1053 }
1054 
1055 static void free_kernel_wrid(struct hns_roce_qp *hr_qp)
1056 {
1057 	kfree(hr_qp->rq.wrid);
1058 	kfree(hr_qp->sq.wrid);
1059 }
1060 
1061 static void default_congest_type(struct hns_roce_dev *hr_dev,
1062 				 struct hns_roce_qp *hr_qp)
1063 {
1064 	if (hr_qp->ibqp.qp_type == IB_QPT_UD ||
1065 	    hr_qp->ibqp.qp_type == IB_QPT_GSI)
1066 		hr_qp->cong_type = CONG_TYPE_DCQCN;
1067 	else
1068 		hr_qp->cong_type = hr_dev->caps.default_cong_type;
1069 }
1070 
1071 static int set_congest_type(struct hns_roce_qp *hr_qp,
1072 			    struct hns_roce_ib_create_qp *ucmd)
1073 {
1074 	struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
1075 
1076 	switch (ucmd->cong_type_flags) {
1077 	case HNS_ROCE_CREATE_QP_FLAGS_DCQCN:
1078 		hr_qp->cong_type = CONG_TYPE_DCQCN;
1079 		break;
1080 	case HNS_ROCE_CREATE_QP_FLAGS_LDCP:
1081 		hr_qp->cong_type = CONG_TYPE_LDCP;
1082 		break;
1083 	case HNS_ROCE_CREATE_QP_FLAGS_HC3:
1084 		hr_qp->cong_type = CONG_TYPE_HC3;
1085 		break;
1086 	case HNS_ROCE_CREATE_QP_FLAGS_DIP:
1087 		hr_qp->cong_type = CONG_TYPE_DIP;
1088 		break;
1089 	default:
1090 		return -EINVAL;
1091 	}
1092 
1093 	if (!test_bit(hr_qp->cong_type, (unsigned long *)&hr_dev->caps.cong_cap))
1094 		return -EOPNOTSUPP;
1095 
1096 	if (hr_qp->ibqp.qp_type == IB_QPT_UD &&
1097 	    hr_qp->cong_type != CONG_TYPE_DCQCN)
1098 		return -EOPNOTSUPP;
1099 
1100 	return 0;
1101 }
1102 
1103 static int set_congest_param(struct hns_roce_dev *hr_dev,
1104 			     struct hns_roce_qp *hr_qp,
1105 			     struct hns_roce_ib_create_qp *ucmd)
1106 {
1107 	if (ucmd->comp_mask & HNS_ROCE_CREATE_QP_MASK_CONGEST_TYPE)
1108 		return set_congest_type(hr_qp, ucmd);
1109 
1110 	default_congest_type(hr_dev, hr_qp);
1111 
1112 	return 0;
1113 }
1114 
1115 static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
1116 			struct ib_qp_init_attr *init_attr,
1117 			struct ib_udata *udata,
1118 			struct hns_roce_ib_create_qp *ucmd)
1119 {
1120 	struct ib_device *ibdev = &hr_dev->ib_dev;
1121 	struct hns_roce_ucontext *uctx;
1122 	int ret;
1123 
1124 	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
1125 		hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
1126 	else
1127 		hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
1128 
1129 	ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp,
1130 			  hns_roce_qp_has_rq(init_attr), !!udata);
1131 	if (ret) {
1132 		ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n",
1133 			  ret);
1134 		return ret;
1135 	}
1136 
1137 	if (udata) {
1138 		ret = ib_copy_from_udata(ucmd, udata,
1139 					 min(udata->inlen, sizeof(*ucmd)));
1140 		if (ret) {
1141 			ibdev_err(ibdev,
1142 				  "failed to copy QP ucmd, ret = %d\n", ret);
1143 			return ret;
1144 		}
1145 
1146 		uctx = rdma_udata_to_drv_context(udata, struct hns_roce_ucontext,
1147 						 ibucontext);
1148 		hr_qp->config = uctx->config;
1149 		ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
1150 		if (ret) {
1151 			ibdev_err(ibdev,
1152 				  "failed to set user SQ size, ret = %d.\n",
1153 				  ret);
1154 			return ret;
1155 		}
1156 
1157 		ret = set_congest_param(hr_dev, hr_qp, ucmd);
1158 	} else {
1159 		if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
1160 			hr_qp->config = HNS_ROCE_EXSGE_FLAGS;
1161 		default_congest_type(hr_dev, hr_qp);
1162 		ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
1163 		if (ret)
1164 			ibdev_err(ibdev,
1165 				  "failed to set kernel SQ size, ret = %d.\n",
1166 				  ret);
1167 	}
1168 
1169 	return ret;
1170 }
1171 
1172 static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
1173 				     struct ib_qp_init_attr *init_attr,
1174 				     struct ib_udata *udata,
1175 				     struct hns_roce_qp *hr_qp)
1176 {
1177 	struct hns_roce_work *flush_work = &hr_qp->flush_work;
1178 	struct hns_roce_ib_create_qp_resp resp = {};
1179 	struct ib_device *ibdev = &hr_dev->ib_dev;
1180 	struct hns_roce_ib_create_qp ucmd = {};
1181 	int ret;
1182 
1183 	mutex_init(&hr_qp->mutex);
1184 	spin_lock_init(&hr_qp->sq.lock);
1185 	spin_lock_init(&hr_qp->rq.lock);
1186 	spin_lock_init(&hr_qp->flush_lock);
1187 
1188 	hr_qp->state = IB_QPS_RESET;
1189 	hr_qp->flush_flag = 0;
1190 	flush_work->hr_dev = hr_dev;
1191 	INIT_WORK(&flush_work->work, flush_work_handle);
1192 
1193 	if (init_attr->create_flags)
1194 		return -EOPNOTSUPP;
1195 
1196 	ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
1197 	if (ret) {
1198 		ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret);
1199 		goto err_out;
1200 	}
1201 
1202 	if (!udata) {
1203 		ret = alloc_kernel_wrid(hr_dev, hr_qp);
1204 		if (ret) {
1205 			ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n",
1206 				  ret);
1207 			goto err_out;
1208 		}
1209 	}
1210 
1211 	ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
1212 	if (ret) {
1213 		ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret);
1214 		goto err_buf;
1215 	}
1216 
1217 	ret = alloc_qpn(hr_dev, hr_qp, init_attr);
1218 	if (ret) {
1219 		ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
1220 		goto err_qpn;
1221 	}
1222 
1223 	ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
1224 	if (ret) {
1225 		ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
1226 			  ret);
1227 		goto err_db;
1228 	}
1229 
1230 	ret = alloc_qpc(hr_dev, hr_qp);
1231 	if (ret) {
1232 		ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n",
1233 			  ret);
1234 		goto err_qpc;
1235 	}
1236 
1237 	ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
1238 	if (ret) {
1239 		ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret);
1240 		goto err_store;
1241 	}
1242 
1243 	if (udata) {
1244 		resp.cap_flags = hr_qp->en_flags;
1245 		ret = ib_copy_to_udata(udata, &resp,
1246 				       min(udata->outlen, sizeof(resp)));
1247 		if (ret) {
1248 			ibdev_err(ibdev, "copy qp resp failed!\n");
1249 			goto err_flow_ctrl;
1250 		}
1251 	}
1252 
1253 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
1254 		ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
1255 		if (ret)
1256 			goto err_flow_ctrl;
1257 	}
1258 
1259 	hr_qp->ibqp.qp_num = hr_qp->qpn;
1260 	hr_qp->event = hns_roce_ib_qp_event;
1261 	refcount_set(&hr_qp->refcount, 1);
1262 	init_completion(&hr_qp->free);
1263 
1264 	return 0;
1265 
1266 err_flow_ctrl:
1267 	hns_roce_qp_remove(hr_dev, hr_qp);
1268 err_store:
1269 	free_qpc(hr_dev, hr_qp);
1270 err_qpc:
1271 	free_qp_db(hr_dev, hr_qp, udata);
1272 err_db:
1273 	free_qpn(hr_dev, hr_qp);
1274 err_qpn:
1275 	free_qp_buf(hr_dev, hr_qp);
1276 err_buf:
1277 	free_kernel_wrid(hr_qp);
1278 err_out:
1279 	mutex_destroy(&hr_qp->mutex);
1280 	return ret;
1281 }
1282 
1283 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
1284 			 struct ib_udata *udata)
1285 {
1286 	if (refcount_dec_and_test(&hr_qp->refcount))
1287 		complete(&hr_qp->free);
1288 	wait_for_completion(&hr_qp->free);
1289 
1290 	free_qpc(hr_dev, hr_qp);
1291 	free_qpn(hr_dev, hr_qp);
1292 	free_qp_buf(hr_dev, hr_qp);
1293 	free_kernel_wrid(hr_qp);
1294 	free_qp_db(hr_dev, hr_qp, udata);
1295 	mutex_destroy(&hr_qp->mutex);
1296 }
1297 
1298 static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
1299 			 bool is_user)
1300 {
1301 	switch (type) {
1302 	case IB_QPT_XRC_INI:
1303 	case IB_QPT_XRC_TGT:
1304 		if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
1305 			goto out;
1306 		break;
1307 	case IB_QPT_UD:
1308 		if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 &&
1309 		    is_user)
1310 			goto out;
1311 		break;
1312 	case IB_QPT_RC:
1313 	case IB_QPT_GSI:
1314 		break;
1315 	default:
1316 		goto out;
1317 	}
1318 
1319 	return 0;
1320 
1321 out:
1322 	ibdev_err(&hr_dev->ib_dev, "not support QP type %d\n", type);
1323 
1324 	return -EOPNOTSUPP;
1325 }
1326 
1327 int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
1328 		       struct ib_udata *udata)
1329 {
1330 	struct ib_device *ibdev = qp->device;
1331 	struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
1332 	struct hns_roce_qp *hr_qp = to_hr_qp(qp);
1333 	int ret;
1334 
1335 	ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata);
1336 	if (ret)
1337 		goto err_out;
1338 
1339 	if (init_attr->qp_type == IB_QPT_XRC_TGT)
1340 		hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn;
1341 
1342 	if (init_attr->qp_type == IB_QPT_GSI) {
1343 		hr_qp->port = init_attr->port_num - 1;
1344 		hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
1345 	}
1346 
1347 	ret = hns_roce_create_qp_common(hr_dev, init_attr, udata, hr_qp);
1348 	if (ret)
1349 		ibdev_err(ibdev, "create QP type %d failed(%d)\n",
1350 			  init_attr->qp_type, ret);
1351 
1352 err_out:
1353 	if (ret)
1354 		atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_QP_CREATE_ERR_CNT]);
1355 
1356 	return ret;
1357 }
1358 
1359 int to_hr_qp_type(int qp_type)
1360 {
1361 	switch (qp_type) {
1362 	case IB_QPT_RC:
1363 		return SERV_TYPE_RC;
1364 	case IB_QPT_UD:
1365 	case IB_QPT_GSI:
1366 		return SERV_TYPE_UD;
1367 	case IB_QPT_XRC_INI:
1368 	case IB_QPT_XRC_TGT:
1369 		return SERV_TYPE_XRC;
1370 	default:
1371 		return -1;
1372 	}
1373 }
1374 
1375 static int check_mtu_validate(struct hns_roce_dev *hr_dev,
1376 			      struct hns_roce_qp *hr_qp,
1377 			      struct ib_qp_attr *attr, int attr_mask)
1378 {
1379 	struct net_device *net_dev;
1380 	enum ib_mtu active_mtu;
1381 	int p;
1382 
1383 	p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1384 	net_dev = get_hr_netdev(hr_dev, p);
1385 	active_mtu = iboe_get_mtu(net_dev->mtu);
1386 
1387 	if ((hr_dev->caps.max_mtu >= IB_MTU_2048 &&
1388 	    attr->path_mtu > hr_dev->caps.max_mtu) ||
1389 	    attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) {
1390 		ibdev_err(&hr_dev->ib_dev,
1391 			"attr path_mtu(%d)invalid while modify qp",
1392 			attr->path_mtu);
1393 		return -EINVAL;
1394 	}
1395 
1396 	return 0;
1397 }
1398 
1399 static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1400 				  int attr_mask)
1401 {
1402 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1403 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1404 	int p;
1405 
1406 	if ((attr_mask & IB_QP_PORT) &&
1407 	    (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
1408 		ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n",
1409 			  attr->port_num);
1410 		return -EINVAL;
1411 	}
1412 
1413 	if (attr_mask & IB_QP_PKEY_INDEX) {
1414 		p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1415 		if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
1416 			ibdev_err(&hr_dev->ib_dev,
1417 				  "invalid attr, pkey_index = %u.\n",
1418 				  attr->pkey_index);
1419 			return -EINVAL;
1420 		}
1421 	}
1422 
1423 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1424 	    attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
1425 		ibdev_err(&hr_dev->ib_dev,
1426 			  "invalid attr, max_rd_atomic = %u.\n",
1427 			  attr->max_rd_atomic);
1428 		return -EINVAL;
1429 	}
1430 
1431 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1432 	    attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
1433 		ibdev_err(&hr_dev->ib_dev,
1434 			  "invalid attr, max_dest_rd_atomic = %u.\n",
1435 			  attr->max_dest_rd_atomic);
1436 		return -EINVAL;
1437 	}
1438 
1439 	if (attr_mask & IB_QP_PATH_MTU)
1440 		return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask);
1441 
1442 	return 0;
1443 }
1444 
1445 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1446 		       int attr_mask, struct ib_udata *udata)
1447 {
1448 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1449 	struct hns_roce_ib_modify_qp_resp resp = {};
1450 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1451 	enum ib_qp_state cur_state, new_state;
1452 	int ret = -EINVAL;
1453 
1454 	mutex_lock(&hr_qp->mutex);
1455 
1456 	if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state)
1457 		goto out;
1458 
1459 	cur_state = hr_qp->state;
1460 	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1461 
1462 	if (ibqp->uobject &&
1463 	    (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
1464 		if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) {
1465 			hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
1466 
1467 			if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
1468 				hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
1469 		} else {
1470 			ibdev_warn(&hr_dev->ib_dev,
1471 				  "flush cqe is not supported in userspace!\n");
1472 			goto out;
1473 		}
1474 	}
1475 
1476 	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1477 				attr_mask)) {
1478 		ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n");
1479 		goto out;
1480 	}
1481 
1482 	ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask);
1483 	if (ret)
1484 		goto out;
1485 
1486 	if (cur_state == new_state && cur_state == IB_QPS_RESET)
1487 		goto out;
1488 
1489 	ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
1490 				    new_state, udata);
1491 	if (ret)
1492 		goto out;
1493 
1494 	if (udata && udata->outlen) {
1495 		resp.tc_mode = hr_qp->tc_mode;
1496 		resp.priority = hr_qp->sl;
1497 		ret = ib_copy_to_udata(udata, &resp,
1498 				       min(udata->outlen, sizeof(resp)));
1499 		if (ret)
1500 			ibdev_err_ratelimited(&hr_dev->ib_dev,
1501 					      "failed to copy modify qp resp.\n");
1502 	}
1503 
1504 out:
1505 	mutex_unlock(&hr_qp->mutex);
1506 	if (ret)
1507 		atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_QP_MODIFY_ERR_CNT]);
1508 
1509 	return ret;
1510 }
1511 
1512 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
1513 		       __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1514 {
1515 	if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1516 		__acquire(&send_cq->lock);
1517 		__acquire(&recv_cq->lock);
1518 	} else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1519 		spin_lock(&send_cq->lock);
1520 		__acquire(&recv_cq->lock);
1521 	} else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1522 		spin_lock(&recv_cq->lock);
1523 		__acquire(&send_cq->lock);
1524 	} else if (send_cq == recv_cq) {
1525 		spin_lock(&send_cq->lock);
1526 		__acquire(&recv_cq->lock);
1527 	} else if (send_cq->cqn < recv_cq->cqn) {
1528 		spin_lock(&send_cq->lock);
1529 		spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1530 	} else {
1531 		spin_lock(&recv_cq->lock);
1532 		spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1533 	}
1534 }
1535 
1536 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1537 			 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
1538 			 __releases(&recv_cq->lock)
1539 {
1540 	if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1541 		__release(&recv_cq->lock);
1542 		__release(&send_cq->lock);
1543 	} else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1544 		__release(&recv_cq->lock);
1545 		spin_unlock(&send_cq->lock);
1546 	} else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1547 		__release(&send_cq->lock);
1548 		spin_unlock(&recv_cq->lock);
1549 	} else if (send_cq == recv_cq) {
1550 		__release(&recv_cq->lock);
1551 		spin_unlock(&send_cq->lock);
1552 	} else if (send_cq->cqn < recv_cq->cqn) {
1553 		spin_unlock(&recv_cq->lock);
1554 		spin_unlock(&send_cq->lock);
1555 	} else {
1556 		spin_unlock(&send_cq->lock);
1557 		spin_unlock(&recv_cq->lock);
1558 	}
1559 }
1560 
1561 static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset)
1562 {
1563 	return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
1564 }
1565 
1566 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n)
1567 {
1568 	return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
1569 }
1570 
1571 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n)
1572 {
1573 	return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
1574 }
1575 
1576 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n)
1577 {
1578 	return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift));
1579 }
1580 
1581 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
1582 			  struct ib_cq *ib_cq)
1583 {
1584 	struct hns_roce_cq *hr_cq;
1585 	u32 cur;
1586 
1587 	cur = hr_wq->head - hr_wq->tail;
1588 	if (likely(cur + nreq < hr_wq->wqe_cnt))
1589 		return false;
1590 
1591 	hr_cq = to_hr_cq(ib_cq);
1592 	spin_lock(&hr_cq->lock);
1593 	cur = hr_wq->head - hr_wq->tail;
1594 	spin_unlock(&hr_cq->lock);
1595 
1596 	return cur + nreq >= hr_wq->wqe_cnt;
1597 }
1598 
1599 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
1600 {
1601 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1602 	unsigned int reserved_from_bot;
1603 	unsigned int i;
1604 
1605 	mutex_init(&qp_table->scc_mutex);
1606 	mutex_init(&qp_table->bank_mutex);
1607 	xa_init(&hr_dev->qp_table_xa);
1608 	xa_init(&qp_table->dip_xa);
1609 
1610 	reserved_from_bot = hr_dev->caps.reserved_qps;
1611 
1612 	for (i = 0; i < reserved_from_bot; i++) {
1613 		hr_dev->qp_table.bank[get_qp_bankid(i)].inuse++;
1614 		hr_dev->qp_table.bank[get_qp_bankid(i)].min++;
1615 	}
1616 
1617 	for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) {
1618 		ida_init(&hr_dev->qp_table.bank[i].ida);
1619 		hr_dev->qp_table.bank[i].max = hr_dev->caps.num_qps /
1620 					       HNS_ROCE_QP_BANK_NUM - 1;
1621 		hr_dev->qp_table.bank[i].next = hr_dev->qp_table.bank[i].min;
1622 	}
1623 
1624 	return 0;
1625 }
1626 
1627 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1628 {
1629 	int i;
1630 
1631 	for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
1632 		ida_destroy(&hr_dev->qp_table.bank[i].ida);
1633 	xa_destroy(&hr_dev->qp_table.dip_xa);
1634 	xa_destroy(&hr_dev->qp_table_xa);
1635 	mutex_destroy(&hr_dev->qp_table.bank_mutex);
1636 	mutex_destroy(&hr_dev->qp_table.scc_mutex);
1637 }
1638