xref: /linux/drivers/infiniband/hw/hns/hns_roce_cq.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <rdma/ib_umem.h>
34 #include <rdma/uverbs_ioctl.h>
35 #include "hns_roce_device.h"
36 #include "hns_roce_cmd.h"
37 #include "hns_roce_hem.h"
38 #include "hns_roce_common.h"
39 
40 static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
41 {
42 	u32 least_load = bank[0].inuse;
43 	u8 bankid = 0;
44 	u32 bankcnt;
45 	u8 i;
46 
47 	for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) {
48 		bankcnt = bank[i].inuse;
49 		if (bankcnt < least_load) {
50 			least_load = bankcnt;
51 			bankid = i;
52 		}
53 	}
54 
55 	return bankid;
56 }
57 
58 static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
59 {
60 	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
61 	struct hns_roce_bank *bank;
62 	u8 bankid;
63 	int id;
64 
65 	mutex_lock(&cq_table->bank_mutex);
66 	bankid = get_least_load_bankid_for_cq(cq_table->bank);
67 	bank = &cq_table->bank[bankid];
68 
69 	id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
70 	if (id < 0) {
71 		mutex_unlock(&cq_table->bank_mutex);
72 		return id;
73 	}
74 
75 	/* the lower 2 bits is bankid */
76 	hr_cq->cqn = (id << CQ_BANKID_SHIFT) | bankid;
77 	bank->inuse++;
78 	mutex_unlock(&cq_table->bank_mutex);
79 
80 	return 0;
81 }
82 
83 static inline u8 get_cq_bankid(unsigned long cqn)
84 {
85 	/* The lower 2 bits of CQN are used to hash to different banks */
86 	return (u8)(cqn & GENMASK(1, 0));
87 }
88 
89 static void free_cqn(struct hns_roce_dev *hr_dev, unsigned long cqn)
90 {
91 	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
92 	struct hns_roce_bank *bank;
93 
94 	bank = &cq_table->bank[get_cq_bankid(cqn)];
95 
96 	ida_free(&bank->ida, cqn >> CQ_BANKID_SHIFT);
97 
98 	mutex_lock(&cq_table->bank_mutex);
99 	bank->inuse--;
100 	mutex_unlock(&cq_table->bank_mutex);
101 }
102 
103 static int hns_roce_create_cqc(struct hns_roce_dev *hr_dev,
104 			       struct hns_roce_cq *hr_cq,
105 			       u64 *mtts, dma_addr_t dma_handle)
106 {
107 	struct ib_device *ibdev = &hr_dev->ib_dev;
108 	struct hns_roce_cmd_mailbox *mailbox;
109 	int ret;
110 
111 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
112 	if (IS_ERR(mailbox)) {
113 		ibdev_err(ibdev, "failed to alloc mailbox for CQC.\n");
114 		return PTR_ERR(mailbox);
115 	}
116 
117 	hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
118 
119 	ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_CQC,
120 				     hr_cq->cqn);
121 	if (ret)
122 		ibdev_err(ibdev,
123 			  "failed to send create cmd for CQ(0x%lx), ret = %d.\n",
124 			  hr_cq->cqn, ret);
125 
126 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
127 
128 	return ret;
129 }
130 
131 static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
132 {
133 	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
134 	struct ib_device *ibdev = &hr_dev->ib_dev;
135 	u64 mtts[MTT_MIN_COUNT] = {};
136 	int ret;
137 
138 	ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts));
139 	if (ret) {
140 		ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret);
141 		return ret;
142 	}
143 
144 	/* Get CQC memory HEM(Hardware Entry Memory) table */
145 	ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
146 	if (ret) {
147 		ibdev_err(ibdev, "failed to get CQ(0x%lx) context, ret = %d.\n",
148 			  hr_cq->cqn, ret);
149 		return ret;
150 	}
151 
152 	ret = xa_err(xa_store_irq(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
153 	if (ret) {
154 		ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
155 		goto err_put;
156 	}
157 
158 	ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts,
159 				  hns_roce_get_mtr_ba(&hr_cq->mtr));
160 	if (ret)
161 		goto err_xa;
162 
163 	return 0;
164 
165 err_xa:
166 	xa_erase_irq(&cq_table->array, hr_cq->cqn);
167 err_put:
168 	hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
169 
170 	return ret;
171 }
172 
173 static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
174 {
175 	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
176 	struct device *dev = hr_dev->dev;
177 	int ret;
178 
179 	ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_CQC,
180 				      hr_cq->cqn);
181 	if (ret)
182 		dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
183 			hr_cq->cqn);
184 
185 	xa_erase_irq(&cq_table->array, hr_cq->cqn);
186 
187 	/* Waiting interrupt process procedure carried out */
188 	synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
189 
190 	/* wait for all interrupt processed */
191 	if (refcount_dec_and_test(&hr_cq->refcount))
192 		complete(&hr_cq->free);
193 	wait_for_completion(&hr_cq->free);
194 
195 	hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
196 }
197 
198 static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
199 			struct ib_udata *udata, unsigned long addr)
200 {
201 	struct ib_device *ibdev = &hr_dev->ib_dev;
202 	struct hns_roce_buf_attr buf_attr = {};
203 	int ret;
204 
205 	buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + PAGE_SHIFT;
206 	buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
207 	buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
208 	buf_attr.region_count = 1;
209 
210 	ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
211 				  hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT,
212 				  udata, addr);
213 	if (ret)
214 		ibdev_err(ibdev, "failed to alloc CQ mtr, ret = %d.\n", ret);
215 
216 	return ret;
217 }
218 
219 static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
220 {
221 	hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr);
222 }
223 
224 static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
225 		       struct ib_udata *udata, unsigned long addr,
226 		       struct hns_roce_ib_create_cq_resp *resp)
227 {
228 	bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB;
229 	struct hns_roce_ucontext *uctx;
230 	int err;
231 
232 	if (udata) {
233 		if (has_db &&
234 		    udata->outlen >= offsetofend(typeof(*resp), cap_flags)) {
235 			uctx = rdma_udata_to_drv_context(udata,
236 					struct hns_roce_ucontext, ibucontext);
237 			err = hns_roce_db_map_user(uctx, addr, &hr_cq->db);
238 			if (err)
239 				return err;
240 			hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
241 			resp->cap_flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
242 		}
243 	} else {
244 		if (has_db) {
245 			err = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
246 			if (err)
247 				return err;
248 			hr_cq->set_ci_db = hr_cq->db.db_record;
249 			*hr_cq->set_ci_db = 0;
250 			hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
251 		}
252 		hr_cq->db_reg = hr_dev->reg_base + hr_dev->odb_offset +
253 				DB_REG_OFFSET * hr_dev->priv_uar.index;
254 	}
255 
256 	return 0;
257 }
258 
259 static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
260 		       struct ib_udata *udata)
261 {
262 	struct hns_roce_ucontext *uctx;
263 
264 	if (!(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB))
265 		return;
266 
267 	hr_cq->flags &= ~HNS_ROCE_CQ_FLAG_RECORD_DB;
268 	if (udata) {
269 		uctx = rdma_udata_to_drv_context(udata,
270 						 struct hns_roce_ucontext,
271 						 ibucontext);
272 		hns_roce_db_unmap_user(uctx, &hr_cq->db);
273 	} else {
274 		hns_roce_free_db(hr_dev, &hr_cq->db);
275 	}
276 }
277 
278 static int verify_cq_create_attr(struct hns_roce_dev *hr_dev,
279 				 const struct ib_cq_init_attr *attr)
280 {
281 	struct ib_device *ibdev = &hr_dev->ib_dev;
282 
283 	if (!attr->cqe || attr->cqe > hr_dev->caps.max_cqes) {
284 		ibdev_err(ibdev, "failed to check CQ count %u, max = %u.\n",
285 			  attr->cqe, hr_dev->caps.max_cqes);
286 		return -EINVAL;
287 	}
288 
289 	if (attr->comp_vector >= hr_dev->caps.num_comp_vectors) {
290 		ibdev_err(ibdev, "failed to check CQ vector = %u, max = %d.\n",
291 			  attr->comp_vector, hr_dev->caps.num_comp_vectors);
292 		return -EINVAL;
293 	}
294 
295 	return 0;
296 }
297 
298 static int get_cq_ucmd(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
299 		       struct hns_roce_ib_create_cq *ucmd)
300 {
301 	struct ib_device *ibdev = hr_cq->ib_cq.device;
302 	int ret;
303 
304 	ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd)));
305 	if (ret) {
306 		ibdev_err(ibdev, "failed to copy CQ udata, ret = %d.\n", ret);
307 		return ret;
308 	}
309 
310 	return 0;
311 }
312 
313 static void set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector,
314 			 struct hns_roce_ib_create_cq *ucmd)
315 {
316 	struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
317 
318 	cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
319 	cq_entries = roundup_pow_of_two(cq_entries);
320 	hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
321 	hr_cq->cq_depth = cq_entries;
322 	hr_cq->vector = vector;
323 
324 	spin_lock_init(&hr_cq->lock);
325 	INIT_LIST_HEAD(&hr_cq->sq_list);
326 	INIT_LIST_HEAD(&hr_cq->rq_list);
327 }
328 
329 static int set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
330 			struct hns_roce_ib_create_cq *ucmd)
331 {
332 	struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
333 
334 	if (!udata) {
335 		hr_cq->cqe_size = hr_dev->caps.cqe_sz;
336 		return 0;
337 	}
338 
339 	if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) {
340 		if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE &&
341 		    ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) {
342 			ibdev_err(&hr_dev->ib_dev,
343 				  "invalid cqe size %u.\n", ucmd->cqe_size);
344 			return -EINVAL;
345 		}
346 
347 		hr_cq->cqe_size = ucmd->cqe_size;
348 	} else {
349 		hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
350 	}
351 
352 	return 0;
353 }
354 
355 int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
356 		       struct uverbs_attr_bundle *attrs)
357 {
358 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
359 	struct ib_udata *udata = &attrs->driver_udata;
360 	struct hns_roce_ib_create_cq_resp resp = {};
361 	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
362 	struct ib_device *ibdev = &hr_dev->ib_dev;
363 	struct hns_roce_ib_create_cq ucmd = {};
364 	int ret;
365 
366 	if (attr->flags) {
367 		ret = -EOPNOTSUPP;
368 		goto err_out;
369 	}
370 
371 	ret = verify_cq_create_attr(hr_dev, attr);
372 	if (ret)
373 		goto err_out;
374 
375 	if (udata) {
376 		ret = get_cq_ucmd(hr_cq, udata, &ucmd);
377 		if (ret)
378 			goto err_out;
379 	}
380 
381 	set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd);
382 
383 	ret = set_cqe_size(hr_cq, udata, &ucmd);
384 	if (ret)
385 		goto err_out;
386 
387 	ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
388 	if (ret) {
389 		ibdev_err(ibdev, "failed to alloc CQ buf, ret = %d.\n", ret);
390 		goto err_out;
391 	}
392 
393 	ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp);
394 	if (ret) {
395 		ibdev_err(ibdev, "failed to alloc CQ db, ret = %d.\n", ret);
396 		goto err_cq_buf;
397 	}
398 
399 	ret = alloc_cqn(hr_dev, hr_cq);
400 	if (ret) {
401 		ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
402 		goto err_cq_db;
403 	}
404 
405 	ret = alloc_cqc(hr_dev, hr_cq);
406 	if (ret) {
407 		ibdev_err(ibdev,
408 			  "failed to alloc CQ context, ret = %d.\n", ret);
409 		goto err_cqn;
410 	}
411 
412 	if (udata) {
413 		resp.cqn = hr_cq->cqn;
414 		ret = ib_copy_to_udata(udata, &resp,
415 				       min(udata->outlen, sizeof(resp)));
416 		if (ret)
417 			goto err_cqc;
418 	}
419 
420 	hr_cq->cons_index = 0;
421 	hr_cq->arm_sn = 1;
422 	refcount_set(&hr_cq->refcount, 1);
423 	init_completion(&hr_cq->free);
424 
425 	return 0;
426 
427 err_cqc:
428 	free_cqc(hr_dev, hr_cq);
429 err_cqn:
430 	free_cqn(hr_dev, hr_cq->cqn);
431 err_cq_db:
432 	free_cq_db(hr_dev, hr_cq, udata);
433 err_cq_buf:
434 	free_cq_buf(hr_dev, hr_cq);
435 err_out:
436 	atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CQ_CREATE_ERR_CNT]);
437 
438 	return ret;
439 }
440 
441 int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
442 {
443 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
444 	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
445 
446 	free_cqc(hr_dev, hr_cq);
447 	free_cqn(hr_dev, hr_cq->cqn);
448 	free_cq_db(hr_dev, hr_cq, udata);
449 	free_cq_buf(hr_dev, hr_cq);
450 
451 	return 0;
452 }
453 
454 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
455 {
456 	struct hns_roce_cq *hr_cq;
457 	struct ib_cq *ibcq;
458 
459 	hr_cq = xa_load(&hr_dev->cq_table.array,
460 			cqn & (hr_dev->caps.num_cqs - 1));
461 	if (!hr_cq) {
462 		dev_warn(hr_dev->dev, "completion event for bogus CQ 0x%06x\n",
463 			 cqn);
464 		return;
465 	}
466 
467 	++hr_cq->arm_sn;
468 	ibcq = &hr_cq->ib_cq;
469 	if (ibcq->comp_handler)
470 		ibcq->comp_handler(ibcq, ibcq->cq_context);
471 }
472 
473 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
474 {
475 	struct device *dev = hr_dev->dev;
476 	struct hns_roce_cq *hr_cq;
477 	struct ib_event event;
478 	struct ib_cq *ibcq;
479 
480 	if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
481 	    event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
482 	    event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
483 		dev_err(dev, "unexpected event type 0x%x on CQ 0x%06x\n",
484 			event_type, cqn);
485 		return;
486 	}
487 
488 	xa_lock(&hr_dev->cq_table.array);
489 	hr_cq = xa_load(&hr_dev->cq_table.array,
490 			cqn & (hr_dev->caps.num_cqs - 1));
491 	if (hr_cq)
492 		refcount_inc(&hr_cq->refcount);
493 	xa_unlock(&hr_dev->cq_table.array);
494 	if (!hr_cq) {
495 		dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
496 		return;
497 	}
498 
499 	ibcq = &hr_cq->ib_cq;
500 	if (ibcq->event_handler) {
501 		event.device = ibcq->device;
502 		event.element.cq = ibcq;
503 		event.event = IB_EVENT_CQ_ERR;
504 		ibcq->event_handler(&event, ibcq->cq_context);
505 	}
506 
507 	if (refcount_dec_and_test(&hr_cq->refcount))
508 		complete(&hr_cq->free);
509 }
510 
511 void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
512 {
513 	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
514 	unsigned int reserved_from_bot;
515 	unsigned int i;
516 
517 	mutex_init(&cq_table->bank_mutex);
518 	xa_init(&cq_table->array);
519 
520 	reserved_from_bot = hr_dev->caps.reserved_cqs;
521 
522 	for (i = 0; i < reserved_from_bot; i++) {
523 		cq_table->bank[get_cq_bankid(i)].inuse++;
524 		cq_table->bank[get_cq_bankid(i)].min++;
525 	}
526 
527 	for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++) {
528 		ida_init(&cq_table->bank[i].ida);
529 		cq_table->bank[i].max = hr_dev->caps.num_cqs /
530 					HNS_ROCE_CQ_BANK_NUM - 1;
531 	}
532 }
533 
534 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
535 {
536 	int i;
537 
538 	for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++)
539 		ida_destroy(&hr_dev->cq_table.bank[i].ida);
540 	mutex_destroy(&hr_dev->cq_table.bank_mutex);
541 }
542