xref: /freebsd/contrib/ofed/libcxgb4/verbs.c (revision e1e636193db45630c7881246d25902e57c43d24e)
1 /*
2  * Copyright (c) 2006-2016 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <config.h>
33 
34 #include <stdlib.h>
35 #include <stdio.h>
36 #include <string.h>
37 #include <errno.h>
38 #include <pthread.h>
39 #include <sys/mman.h>
40 #include <inttypes.h>
41 #include <assert.h>
42 
43 #include "libcxgb4.h"
44 #include "cxgb4-abi.h"
45 
46 #define MASKED(x) (void *)((unsigned long)(x) & c4iw_page_mask)
47 
48 int c4iw_query_device(struct ibv_context *context, struct ibv_device_attr *attr)
49 {
50 	struct ibv_query_device cmd;
51 	uint64_t raw_fw_ver;
52 	u8 major, minor, sub_minor, build;
53 	int ret;
54 
55 	ret = ibv_cmd_query_device(context, attr, &raw_fw_ver, &cmd,
56 				   sizeof cmd);
57 	if (ret)
58 		return ret;
59 
60 	major = (raw_fw_ver >> 24) & 0xff;
61 	minor = (raw_fw_ver >> 16) & 0xff;
62 	sub_minor = (raw_fw_ver >> 8) & 0xff;
63 	build = raw_fw_ver & 0xff;
64 
65 	snprintf(attr->fw_ver, sizeof attr->fw_ver,
66 		 "%d.%d.%d.%d", major, minor, sub_minor, build);
67 
68 	return 0;
69 }
70 
71 int c4iw_query_port(struct ibv_context *context, uint8_t port,
72 		    struct ibv_port_attr *attr)
73 {
74 	struct ibv_query_port cmd;
75 
76 	return ibv_cmd_query_port(context, port, attr, &cmd, sizeof cmd);
77 }
78 
79 struct ibv_pd *c4iw_alloc_pd(struct ibv_context *context)
80 {
81 	struct ibv_alloc_pd cmd;
82 	struct c4iw_alloc_pd_resp resp;
83 	struct c4iw_pd *pd;
84 
85 	pd = malloc(sizeof *pd);
86 	if (!pd)
87 		return NULL;
88 
89 	if (ibv_cmd_alloc_pd(context, &pd->ibv_pd, &cmd, sizeof cmd,
90 			     &resp.ibv_resp, sizeof resp)) {
91 		free(pd);
92 		return NULL;
93 	}
94 
95 	return &pd->ibv_pd;
96 }
97 
98 int c4iw_free_pd(struct ibv_pd *pd)
99 {
100 	int ret;
101 
102 	ret = ibv_cmd_dealloc_pd(pd);
103 	if (ret)
104 		return ret;
105 
106 	free(pd);
107 	return 0;
108 }
109 
110 static struct ibv_mr *__c4iw_reg_mr(struct ibv_pd *pd, void *addr,
111 				    size_t length, uint64_t hca_va,
112 				    int access)
113 {
114 	struct c4iw_mr *mhp;
115 	struct ibv_reg_mr cmd;
116 	struct ibv_reg_mr_resp resp;
117 	struct c4iw_dev *dev = to_c4iw_dev(pd->context->device);
118 
119 	mhp = malloc(sizeof *mhp);
120 	if (!mhp)
121 		return NULL;
122 
123 	if (ibv_cmd_reg_mr(pd, addr, length, hca_va,
124 			   access, &mhp->ibv_mr, &cmd, sizeof cmd,
125 			   &resp, sizeof resp)) {
126 		free(mhp);
127 		return NULL;
128 	}
129 
130 	mhp->va_fbo = hca_va;
131 	mhp->len = length;
132 
133 	PDBG("%s stag 0x%x va_fbo 0x%" PRIx64 " len %d\n",
134 	     __func__, mhp->ibv_mr.rkey, mhp->va_fbo, mhp->len);
135 
136 	pthread_spin_lock(&dev->lock);
137 	dev->mmid2ptr[c4iw_mmid(mhp->ibv_mr.lkey)] = mhp;
138 	pthread_spin_unlock(&dev->lock);
139 	INC_STAT(mr);
140 	return &mhp->ibv_mr;
141 }
142 
143 struct ibv_mr *c4iw_reg_mr(struct ibv_pd *pd, void *addr,
144 			   size_t length, int access)
145 {
146 	PDBG("%s addr %p length %ld\n", __func__, addr, length);
147 	return __c4iw_reg_mr(pd, addr, length, (uintptr_t) addr, access);
148 }
149 
150 int c4iw_dereg_mr(struct ibv_mr *mr)
151 {
152 	int ret;
153 	struct c4iw_dev *dev = to_c4iw_dev(mr->pd->context->device);
154 
155 	ret = ibv_cmd_dereg_mr(mr);
156 	if (ret)
157 		return ret;
158 
159 	pthread_spin_lock(&dev->lock);
160 	dev->mmid2ptr[c4iw_mmid(mr->lkey)] = NULL;
161 	pthread_spin_unlock(&dev->lock);
162 
163 	free(to_c4iw_mr(mr));
164 
165 	return 0;
166 }
167 
168 struct ibv_cq *c4iw_create_cq(struct ibv_context *context, int cqe,
169 			      struct ibv_comp_channel *channel, int comp_vector)
170 {
171 	struct ibv_create_cq cmd;
172 	struct c4iw_create_cq_resp resp;
173 	struct c4iw_cq *chp;
174 	struct c4iw_dev *dev = to_c4iw_dev(context->device);
175 	int ret;
176 
177 	chp = calloc(1, sizeof *chp);
178 	if (!chp) {
179 		return NULL;
180 	}
181 
182 	resp.reserved = 0;
183 	ret = ibv_cmd_create_cq(context, cqe, channel, comp_vector,
184 				&chp->ibv_cq, &cmd, sizeof cmd,
185 				&resp.ibv_resp, sizeof resp);
186 	if (ret)
187 		goto err1;
188 
189 	if (resp.reserved)
190 		PDBG("%s c4iw_create_cq_resp reserved field modified by kernel\n",
191 		     __FUNCTION__);
192 
193 	ret = pthread_spin_init(&chp->lock, PTHREAD_PROCESS_PRIVATE);
194 	if (ret)
195 		goto err2;
196 #ifdef STALL_DETECTION
197 	gettimeofday(&chp->time, NULL);
198 #endif
199 	chp->rhp = dev;
200 	chp->cq.qid_mask = resp.qid_mask;
201 	chp->cq.cqid = resp.cqid;
202 	chp->cq.size = resp.size;
203 	chp->cq.memsize = resp.memsize;
204 	chp->cq.gen = 1;
205 	chp->cq.queue = mmap(NULL, chp->cq.memsize, PROT_READ|PROT_WRITE,
206 			     MAP_SHARED, context->cmd_fd, resp.key);
207 	if (chp->cq.queue == MAP_FAILED)
208 		goto err3;
209 
210 	chp->cq.ugts = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
211 			   context->cmd_fd, resp.gts_key);
212 	if (chp->cq.ugts == MAP_FAILED)
213 		goto err4;
214 
215 	if (dev_is_t4(chp->rhp))
216 		chp->cq.ugts += 1;
217 	else
218 		chp->cq.ugts += 5;
219 	chp->cq.sw_queue = calloc(chp->cq.size, sizeof *chp->cq.queue);
220 	if (!chp->cq.sw_queue)
221 		goto err5;
222 
223 	PDBG("%s cqid 0x%x key %" PRIx64 " va %p memsize %lu gts_key %"
224 	       PRIx64 " va %p qid_mask 0x%x\n",
225 	       __func__, chp->cq.cqid, resp.key, chp->cq.queue,
226 	       chp->cq.memsize, resp.gts_key, chp->cq.ugts, chp->cq.qid_mask);
227 
228 	pthread_spin_lock(&dev->lock);
229 	dev->cqid2ptr[chp->cq.cqid] = chp;
230 	pthread_spin_unlock(&dev->lock);
231 	INC_STAT(cq);
232 	return &chp->ibv_cq;
233 err5:
234 	munmap(MASKED(chp->cq.ugts), c4iw_page_size);
235 err4:
236 	munmap(chp->cq.queue, chp->cq.memsize);
237 err3:
238 	pthread_spin_destroy(&chp->lock);
239 err2:
240 	(void)ibv_cmd_destroy_cq(&chp->ibv_cq);
241 err1:
242 	free(chp);
243 	return NULL;
244 }
245 
246 int c4iw_resize_cq(struct ibv_cq *ibcq, int cqe)
247 {
248 #if 0
249 	int ret;
250 
251 	struct ibv_resize_cq cmd;
252 	struct ibv_resize_cq_resp resp;
253 	ret = ibv_cmd_resize_cq(ibcq, cqe, &cmd, sizeof cmd, &resp, sizeof resp);
254 	PDBG("%s ret %d\n", __func__, ret);
255 	return ret;
256 #else
257 	return -ENOSYS;
258 #endif
259 }
260 
261 int c4iw_destroy_cq(struct ibv_cq *ibcq)
262 {
263 	int ret;
264 	struct c4iw_cq *chp = to_c4iw_cq(ibcq);
265 	struct c4iw_dev *dev = to_c4iw_dev(ibcq->context->device);
266 
267 	chp->cq.error = 1;
268 	ret = ibv_cmd_destroy_cq(ibcq);
269 	if (ret) {
270 		return ret;
271 	}
272 	verbs_cleanup_cq(ibcq);
273 	munmap(MASKED(chp->cq.ugts), c4iw_page_size);
274 	munmap(chp->cq.queue, chp->cq.memsize);
275 
276 	pthread_spin_lock(&dev->lock);
277 	dev->cqid2ptr[chp->cq.cqid] = NULL;
278 	pthread_spin_unlock(&dev->lock);
279 
280 	free(chp->cq.sw_queue);
281 	pthread_spin_destroy(&chp->lock);
282 	free(chp);
283 	return 0;
284 }
285 
286 struct ibv_srq *c4iw_create_srq(struct ibv_pd *pd,
287 				struct ibv_srq_init_attr *attr)
288 {
289 	return NULL;
290 }
291 
292 int c4iw_modify_srq(struct ibv_srq *srq, struct ibv_srq_attr *attr,
293 		    int attr_mask)
294 {
295 	return ENOSYS;
296 }
297 
298 int c4iw_destroy_srq(struct ibv_srq *srq)
299 {
300 	return ENOSYS;
301 }
302 
303 int c4iw_post_srq_recv(struct ibv_srq *ibsrq, struct ibv_recv_wr *wr,
304 		       struct ibv_recv_wr **bad_wr)
305 {
306 	return ENOSYS;
307 }
308 
309 static struct ibv_qp *create_qp_v0(struct ibv_pd *pd,
310 				   struct ibv_qp_init_attr *attr)
311 {
312 	struct ibv_create_qp cmd;
313 	struct c4iw_create_qp_resp_v0 resp;
314 	struct c4iw_qp *qhp;
315 	struct c4iw_dev *dev = to_c4iw_dev(pd->context->device);
316 	int ret;
317 	void *dbva;
318 
319 	PDBG("%s enter qp\n", __func__);
320 	qhp = calloc(1, sizeof *qhp);
321 	if (!qhp)
322 		goto err1;
323 
324 	ret = ibv_cmd_create_qp(pd, &qhp->ibv_qp, attr, &cmd,
325 				sizeof cmd, &resp.ibv_resp, sizeof resp);
326 	if (ret)
327 		goto err2;
328 
329 	PDBG("%s sqid 0x%x sq key %" PRIx64 " sq db/gts key %" PRIx64
330 	       " rqid 0x%x rq key %" PRIx64 " rq db/gts key %" PRIx64
331 	       " qid_mask 0x%x\n",
332 		__func__,
333 		resp.sqid, resp.sq_key, resp.sq_db_gts_key,
334 		resp.rqid, resp.rq_key, resp.rq_db_gts_key, resp.qid_mask);
335 
336 	qhp->wq.qid_mask = resp.qid_mask;
337 	qhp->rhp = dev;
338 	qhp->wq.sq.qid = resp.sqid;
339 	qhp->wq.sq.size = resp.sq_size;
340 	qhp->wq.sq.memsize = resp.sq_memsize;
341 	qhp->wq.sq.flags = 0;
342 	qhp->wq.rq.msn = 1;
343 	qhp->wq.rq.qid = resp.rqid;
344 	qhp->wq.rq.size = resp.rq_size;
345 	qhp->wq.rq.memsize = resp.rq_memsize;
346 	ret = pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE);
347 	if (ret)
348 		goto err3;
349 
350 	dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
351 		    pd->context->cmd_fd, resp.sq_db_gts_key);
352 	if (dbva == MAP_FAILED)
353 		goto err4;
354 
355 	qhp->wq.sq.udb = dbva;
356 	qhp->wq.sq.queue = mmap(NULL, qhp->wq.sq.memsize,
357 			    PROT_WRITE, MAP_SHARED,
358 			    pd->context->cmd_fd, resp.sq_key);
359 	if (qhp->wq.sq.queue == MAP_FAILED)
360 		goto err5;
361 
362 	dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
363 		    pd->context->cmd_fd, resp.rq_db_gts_key);
364 	if (dbva == MAP_FAILED)
365 		goto err6;
366 	qhp->wq.rq.udb = dbva;
367 	qhp->wq.rq.queue = mmap(NULL, qhp->wq.rq.memsize,
368 			    PROT_WRITE, MAP_SHARED,
369 			    pd->context->cmd_fd, resp.rq_key);
370 	if (qhp->wq.rq.queue == MAP_FAILED)
371 		goto err7;
372 
373 	qhp->wq.sq.sw_sq = calloc(qhp->wq.sq.size, sizeof (struct t4_swsqe));
374 	if (!qhp->wq.sq.sw_sq)
375 		goto err8;
376 
377 	qhp->wq.rq.sw_rq = calloc(qhp->wq.rq.size, sizeof (uint64_t));
378 	if (!qhp->wq.rq.sw_rq)
379 		goto err9;
380 
381 	PDBG("%s sq dbva %p sq qva %p sq depth %u sq memsize %lu "
382 	       " rq dbva %p rq qva %p rq depth %u rq memsize %lu\n",
383 	     __func__,
384 	     qhp->wq.sq.udb, qhp->wq.sq.queue,
385 	     qhp->wq.sq.size, qhp->wq.sq.memsize,
386 	     qhp->wq.rq.udb, qhp->wq.rq.queue,
387 	     qhp->wq.rq.size, qhp->wq.rq.memsize);
388 
389 	qhp->sq_sig_all = attr->sq_sig_all;
390 
391 	pthread_spin_lock(&dev->lock);
392 	dev->qpid2ptr[qhp->wq.sq.qid] = qhp;
393 	pthread_spin_unlock(&dev->lock);
394 	INC_STAT(qp);
395 	return &qhp->ibv_qp;
396 err9:
397 	free(qhp->wq.sq.sw_sq);
398 err8:
399 	munmap((void *)qhp->wq.rq.queue, qhp->wq.rq.memsize);
400 err7:
401 	munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size);
402 err6:
403 	munmap((void *)qhp->wq.sq.queue, qhp->wq.sq.memsize);
404 err5:
405 	munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size);
406 err4:
407 	pthread_spin_destroy(&qhp->lock);
408 err3:
409 	(void)ibv_cmd_destroy_qp(&qhp->ibv_qp);
410 err2:
411 	free(qhp);
412 err1:
413 	return NULL;
414 }
415 
416 static struct ibv_qp *create_qp(struct ibv_pd *pd,
417 				struct ibv_qp_init_attr *attr)
418 {
419 	struct ibv_create_qp cmd;
420 	struct c4iw_create_qp_resp resp;
421 	struct c4iw_qp *qhp;
422 	struct c4iw_dev *dev = to_c4iw_dev(pd->context->device);
423 	struct c4iw_context *ctx = to_c4iw_context(pd->context);
424 	int ret;
425 	void *dbva;
426 
427 	PDBG("%s enter qp\n", __func__);
428 	qhp = calloc(1, sizeof *qhp);
429 	if (!qhp)
430 		goto err1;
431 
432 	ret = ibv_cmd_create_qp(pd, &qhp->ibv_qp, attr, &cmd,
433 				sizeof cmd, &resp.ibv_resp, sizeof resp);
434 	if (ret)
435 		goto err2;
436 
437 	PDBG("%s sqid 0x%x sq key %" PRIx64 " sq db/gts key %" PRIx64
438 	       " rqid 0x%x rq key %" PRIx64 " rq db/gts key %" PRIx64
439 	       " qid_mask 0x%x\n",
440 		__func__,
441 		resp.sqid, resp.sq_key, resp.sq_db_gts_key,
442 		resp.rqid, resp.rq_key, resp.rq_db_gts_key, resp.qid_mask);
443 
444 	qhp->wq.qid_mask = resp.qid_mask;
445 	qhp->rhp = dev;
446 	qhp->wq.sq.qid = resp.sqid;
447 	qhp->wq.sq.size = resp.sq_size;
448 	qhp->wq.sq.memsize = resp.sq_memsize;
449 	qhp->wq.sq.flags = resp.flags & C4IW_QPF_ONCHIP ? T4_SQ_ONCHIP : 0;
450 	qhp->wq.sq.flush_cidx = -1;
451 	qhp->wq.rq.msn = 1;
452 	qhp->wq.rq.qid = resp.rqid;
453 	qhp->wq.rq.size = resp.rq_size;
454 	qhp->wq.rq.memsize = resp.rq_memsize;
455 	if (ma_wr && resp.sq_memsize < (resp.sq_size + 1) *
456 	    sizeof *qhp->wq.sq.queue + 16*sizeof(__be64) ) {
457 		ma_wr = 0;
458 		fprintf(stderr, "libcxgb4 warning - downlevel iw_cxgb4 driver. "
459 			"MA workaround disabled.\n");
460 	}
461 	ret = pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE);
462 	if (ret)
463 		goto err3;
464 
465 	dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
466 		    pd->context->cmd_fd, resp.sq_db_gts_key);
467 	if (dbva == MAP_FAILED)
468 		goto err4;
469 	qhp->wq.sq.udb = dbva;
470 	if (!dev_is_t4(qhp->rhp)) {
471 		unsigned long segment_offset = 128 * (qhp->wq.sq.qid &
472 						      qhp->wq.qid_mask);
473 
474 		if (segment_offset < c4iw_page_size) {
475 			qhp->wq.sq.udb += segment_offset / 4;
476 			qhp->wq.sq.wc_reg_available = 1;
477 		} else
478 			qhp->wq.sq.bar2_qid = qhp->wq.sq.qid & qhp->wq.qid_mask;
479 		qhp->wq.sq.udb += 2;
480 	}
481 
482 	qhp->wq.sq.queue = mmap(NULL, qhp->wq.sq.memsize,
483 			    PROT_READ|PROT_WRITE, MAP_SHARED,
484 			    pd->context->cmd_fd, resp.sq_key);
485 	if (qhp->wq.sq.queue == MAP_FAILED)
486 		goto err5;
487 
488 	dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
489 		    pd->context->cmd_fd, resp.rq_db_gts_key);
490 	if (dbva == MAP_FAILED)
491 		goto err6;
492 	qhp->wq.rq.udb = dbva;
493 	if (!dev_is_t4(qhp->rhp)) {
494 		unsigned long segment_offset = 128 * (qhp->wq.rq.qid &
495 						      qhp->wq.qid_mask);
496 
497 		if (segment_offset < c4iw_page_size) {
498 			qhp->wq.rq.udb += segment_offset / 4;
499 			qhp->wq.rq.wc_reg_available = 1;
500 		} else
501 			qhp->wq.rq.bar2_qid = qhp->wq.rq.qid & qhp->wq.qid_mask;
502 		qhp->wq.rq.udb += 2;
503 	}
504 	qhp->wq.rq.queue = mmap(NULL, qhp->wq.rq.memsize,
505 			    PROT_READ|PROT_WRITE, MAP_SHARED,
506 			    pd->context->cmd_fd, resp.rq_key);
507 	if (qhp->wq.rq.queue == MAP_FAILED)
508 		goto err7;
509 
510 	qhp->wq.sq.sw_sq = calloc(qhp->wq.sq.size, sizeof (struct t4_swsqe));
511 	if (!qhp->wq.sq.sw_sq)
512 		goto err8;
513 
514 	qhp->wq.rq.sw_rq = calloc(qhp->wq.rq.size, sizeof (uint64_t));
515 	if (!qhp->wq.rq.sw_rq)
516 		goto err9;
517 
518 	if (t4_sq_onchip(&qhp->wq)) {
519 		qhp->wq.sq.ma_sync = mmap(NULL, c4iw_page_size, PROT_WRITE,
520 					  MAP_SHARED, pd->context->cmd_fd,
521 					  resp.ma_sync_key);
522 		if (qhp->wq.sq.ma_sync == MAP_FAILED)
523 			goto err10;
524 		qhp->wq.sq.ma_sync += (A_PCIE_MA_SYNC & (c4iw_page_size - 1));
525 	}
526 
527 	if (ctx->status_page_size) {
528 		qhp->wq.db_offp = &ctx->status_page->db_off;
529 	} else {
530 		qhp->wq.db_offp =
531 			&qhp->wq.rq.queue[qhp->wq.rq.size].status.db_off;
532 	}
533 
534 	PDBG("%s sq dbva %p sq qva %p sq depth %u sq memsize %lu "
535 	       " rq dbva %p rq qva %p rq depth %u rq memsize %lu\n",
536 	     __func__,
537 	     qhp->wq.sq.udb, qhp->wq.sq.queue,
538 	     qhp->wq.sq.size, qhp->wq.sq.memsize,
539 	     qhp->wq.rq.udb, qhp->wq.rq.queue,
540 	     qhp->wq.rq.size, qhp->wq.rq.memsize);
541 
542 	qhp->sq_sig_all = attr->sq_sig_all;
543 
544 	pthread_spin_lock(&dev->lock);
545 	dev->qpid2ptr[qhp->wq.sq.qid] = qhp;
546 	pthread_spin_unlock(&dev->lock);
547 	INC_STAT(qp);
548 	return &qhp->ibv_qp;
549 err10:
550 	free(qhp->wq.rq.sw_rq);
551 err9:
552 	free(qhp->wq.sq.sw_sq);
553 err8:
554 	munmap((void *)qhp->wq.rq.queue, qhp->wq.rq.memsize);
555 err7:
556 	munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size);
557 err6:
558 	munmap((void *)qhp->wq.sq.queue, qhp->wq.sq.memsize);
559 err5:
560 	munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size);
561 err4:
562 	pthread_spin_destroy(&qhp->lock);
563 err3:
564 	(void)ibv_cmd_destroy_qp(&qhp->ibv_qp);
565 err2:
566 	free(qhp);
567 err1:
568 	return NULL;
569 }
570 
571 struct ibv_qp *c4iw_create_qp(struct ibv_pd *pd,
572 				     struct ibv_qp_init_attr *attr)
573 {
574 	struct c4iw_dev *dev = to_c4iw_dev(pd->context->device);
575 
576 	if (dev->abi_version == 0)
577 		return create_qp_v0(pd, attr);
578 	return create_qp(pd, attr);
579 }
580 
581 static void reset_qp(struct c4iw_qp *qhp)
582 {
583 	PDBG("%s enter qp %p\n", __func__, qhp);
584 	qhp->wq.sq.cidx = 0;
585 	qhp->wq.sq.wq_pidx = qhp->wq.sq.pidx = qhp->wq.sq.in_use = 0;
586 	qhp->wq.rq.cidx = qhp->wq.rq.pidx = qhp->wq.rq.in_use = 0;
587 	qhp->wq.sq.oldest_read = NULL;
588 	memset(qhp->wq.sq.queue, 0, qhp->wq.sq.memsize);
589 	if (t4_sq_onchip(&qhp->wq))
590 		mmio_flush_writes();
591 	memset(qhp->wq.rq.queue, 0, qhp->wq.rq.memsize);
592 }
593 
594 int c4iw_modify_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr,
595 		   int attr_mask)
596 {
597 	struct ibv_modify_qp cmd = {};
598 	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
599 	int ret;
600 
601 	PDBG("%s enter qp %p new state %d\n", __func__, ibqp, attr_mask & IBV_QP_STATE ? attr->qp_state : -1);
602 	pthread_spin_lock(&qhp->lock);
603 	if (t4_wq_in_error(&qhp->wq))
604 		c4iw_flush_qp(qhp);
605 	ret = ibv_cmd_modify_qp(ibqp, attr, attr_mask, &cmd, sizeof cmd);
606 	if (!ret && (attr_mask & IBV_QP_STATE) && attr->qp_state == IBV_QPS_RESET)
607 		reset_qp(qhp);
608 	pthread_spin_unlock(&qhp->lock);
609 	return ret;
610 }
611 
612 int c4iw_destroy_qp(struct ibv_qp *ibqp)
613 {
614 	int ret;
615 	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
616 	struct c4iw_dev *dev = to_c4iw_dev(ibqp->context->device);
617 
618 	PDBG("%s enter qp %p\n", __func__, ibqp);
619 	pthread_spin_lock(&qhp->lock);
620 	c4iw_flush_qp(qhp);
621 	pthread_spin_unlock(&qhp->lock);
622 
623 	ret = ibv_cmd_destroy_qp(ibqp);
624 	if (ret) {
625 		return ret;
626 	}
627 	if (t4_sq_onchip(&qhp->wq)) {
628 		qhp->wq.sq.ma_sync -= (A_PCIE_MA_SYNC & (c4iw_page_size - 1));
629 		munmap((void *)qhp->wq.sq.ma_sync, c4iw_page_size);
630 	}
631 	munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size);
632 	munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size);
633 	munmap(qhp->wq.sq.queue, qhp->wq.sq.memsize);
634 	munmap(qhp->wq.rq.queue, qhp->wq.rq.memsize);
635 
636 	pthread_spin_lock(&dev->lock);
637 	dev->qpid2ptr[qhp->wq.sq.qid] = NULL;
638 	pthread_spin_unlock(&dev->lock);
639 
640 	free(qhp->wq.rq.sw_rq);
641 	free(qhp->wq.sq.sw_sq);
642 	pthread_spin_destroy(&qhp->lock);
643 	free(qhp);
644 	return 0;
645 }
646 
647 int c4iw_query_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr,
648 		  int attr_mask, struct ibv_qp_init_attr *init_attr)
649 {
650 	struct ibv_query_qp cmd;
651 	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
652 	int ret;
653 
654 	pthread_spin_lock(&qhp->lock);
655 	if (t4_wq_in_error(&qhp->wq))
656 		c4iw_flush_qp(qhp);
657 	ret = ibv_cmd_query_qp(ibqp, attr, attr_mask, init_attr, &cmd, sizeof cmd);
658 	pthread_spin_unlock(&qhp->lock);
659 	return ret;
660 }
661 
662 struct ibv_ah *c4iw_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr)
663 {
664 	return NULL;
665 }
666 
667 int c4iw_destroy_ah(struct ibv_ah *ah)
668 {
669 	return ENOSYS;
670 }
671 
672 int c4iw_attach_mcast(struct ibv_qp *ibqp, const union ibv_gid *gid,
673 		      uint16_t lid)
674 {
675 	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
676 	int ret;
677 
678 	pthread_spin_lock(&qhp->lock);
679 	if (t4_wq_in_error(&qhp->wq))
680 		c4iw_flush_qp(qhp);
681 	ret = ibv_cmd_attach_mcast(ibqp, gid, lid);
682 	pthread_spin_unlock(&qhp->lock);
683 	return ret;
684 }
685 
686 int c4iw_detach_mcast(struct ibv_qp *ibqp, const union ibv_gid *gid,
687 		      uint16_t lid)
688 {
689 	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
690 	int ret;
691 
692 	pthread_spin_lock(&qhp->lock);
693 	if (t4_wq_in_error(&qhp->wq))
694 		c4iw_flush_qp(qhp);
695 	ret = ibv_cmd_detach_mcast(ibqp, gid, lid);
696 	pthread_spin_unlock(&qhp->lock);
697 	return ret;
698 }
699 
700 void c4iw_async_event(struct ibv_async_event *event)
701 {
702 	PDBG("%s type %d obj %p\n", __func__, event->event_type,
703 	event->element.cq);
704 
705 	switch (event->event_type) {
706 	case IBV_EVENT_CQ_ERR:
707 		break;
708 	case IBV_EVENT_QP_FATAL:
709 	case IBV_EVENT_QP_REQ_ERR:
710 	case IBV_EVENT_QP_ACCESS_ERR:
711 	case IBV_EVENT_PATH_MIG_ERR: {
712 		struct c4iw_qp *qhp = to_c4iw_qp(event->element.qp);
713 		pthread_spin_lock(&qhp->lock);
714 		c4iw_flush_qp(qhp);
715 		pthread_spin_unlock(&qhp->lock);
716 		break;
717 	}
718 	case IBV_EVENT_SQ_DRAINED:
719 	case IBV_EVENT_PATH_MIG:
720 	case IBV_EVENT_COMM_EST:
721 	case IBV_EVENT_QP_LAST_WQE_REACHED:
722 	default:
723 		break;
724 	}
725 }
726