xref: /freebsd/contrib/ofed/libcxgb4/verbs.c (revision 7ef62cebc2f965b0f640263e179276928885e33d)
1 /*
2  * Copyright (c) 2006-2016 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <config.h>
33 
34 #include <stdlib.h>
35 #include <stdio.h>
36 #include <string.h>
37 #include <errno.h>
38 #include <pthread.h>
39 #include <sys/mman.h>
40 #include <inttypes.h>
41 #include <assert.h>
42 
43 #include "libcxgb4.h"
44 #include "cxgb4-abi.h"
45 
46 #define MASKED(x) (void *)((unsigned long)(x) & c4iw_page_mask)
47 
48 int c4iw_query_device(struct ibv_context *context, struct ibv_device_attr *attr)
49 {
50 	struct ibv_query_device cmd;
51 	uint64_t raw_fw_ver;
52 	u8 major, minor, sub_minor, build;
53 	int ret;
54 
55 	ret = ibv_cmd_query_device(context, attr, &raw_fw_ver, &cmd,
56 				   sizeof cmd);
57 	if (ret)
58 		return ret;
59 
60 	major = (raw_fw_ver >> 24) & 0xff;
61 	minor = (raw_fw_ver >> 16) & 0xff;
62 	sub_minor = (raw_fw_ver >> 8) & 0xff;
63 	build = raw_fw_ver & 0xff;
64 
65 	snprintf(attr->fw_ver, sizeof attr->fw_ver,
66 		 "%d.%d.%d.%d", major, minor, sub_minor, build);
67 
68 	return 0;
69 }
70 
71 int c4iw_query_port(struct ibv_context *context, uint8_t port,
72 		    struct ibv_port_attr *attr)
73 {
74 	struct ibv_query_port cmd;
75 
76 	return ibv_cmd_query_port(context, port, attr, &cmd, sizeof cmd);
77 }
78 
79 struct ibv_pd *c4iw_alloc_pd(struct ibv_context *context)
80 {
81 	struct ibv_alloc_pd cmd;
82 	struct c4iw_alloc_pd_resp resp;
83 	struct c4iw_pd *pd;
84 
85 	pd = malloc(sizeof *pd);
86 	if (!pd)
87 		return NULL;
88 
89 	if (ibv_cmd_alloc_pd(context, &pd->ibv_pd, &cmd, sizeof cmd,
90 			     &resp.ibv_resp, sizeof resp)) {
91 		free(pd);
92 		return NULL;
93 	}
94 
95 	return &pd->ibv_pd;
96 }
97 
98 int c4iw_free_pd(struct ibv_pd *pd)
99 {
100 	int ret;
101 
102 	ret = ibv_cmd_dealloc_pd(pd);
103 	if (ret)
104 		return ret;
105 
106 	free(pd);
107 	return 0;
108 }
109 
110 static struct ibv_mr *__c4iw_reg_mr(struct ibv_pd *pd, void *addr,
111 				    size_t length, uint64_t hca_va,
112 				    int access)
113 {
114 	struct c4iw_mr *mhp;
115 	struct ibv_reg_mr cmd;
116 	struct ibv_reg_mr_resp resp;
117 	struct c4iw_dev *dev = to_c4iw_dev(pd->context->device);
118 
119 	mhp = malloc(sizeof *mhp);
120 	if (!mhp)
121 		return NULL;
122 
123 	if (ibv_cmd_reg_mr(pd, addr, length, hca_va,
124 			   access, &mhp->ibv_mr, &cmd, sizeof cmd,
125 			   &resp, sizeof resp)) {
126 		free(mhp);
127 		return NULL;
128 	}
129 
130 	mhp->va_fbo = hca_va;
131 	mhp->len = length;
132 
133 	PDBG("%s stag 0x%x va_fbo 0x%" PRIx64 " len %d\n",
134 	     __func__, mhp->ibv_mr.rkey, mhp->va_fbo, mhp->len);
135 
136 	pthread_spin_lock(&dev->lock);
137 	dev->mmid2ptr[c4iw_mmid(mhp->ibv_mr.lkey)] = mhp;
138 	pthread_spin_unlock(&dev->lock);
139 	INC_STAT(mr);
140 	return &mhp->ibv_mr;
141 }
142 
143 struct ibv_mr *c4iw_reg_mr(struct ibv_pd *pd, void *addr,
144 			   size_t length, int access)
145 {
146 	PDBG("%s addr %p length %ld\n", __func__, addr, length);
147 	return __c4iw_reg_mr(pd, addr, length, (uintptr_t) addr, access);
148 }
149 
150 int c4iw_dereg_mr(struct ibv_mr *mr)
151 {
152 	int ret;
153 	struct c4iw_dev *dev = to_c4iw_dev(mr->pd->context->device);
154 
155 	ret = ibv_cmd_dereg_mr(mr);
156 	if (ret)
157 		return ret;
158 
159 	pthread_spin_lock(&dev->lock);
160 	dev->mmid2ptr[c4iw_mmid(mr->lkey)] = NULL;
161 	pthread_spin_unlock(&dev->lock);
162 
163 	free(to_c4iw_mr(mr));
164 
165 	return 0;
166 }
167 
168 struct ibv_cq *c4iw_create_cq(struct ibv_context *context, int cqe,
169 			      struct ibv_comp_channel *channel, int comp_vector)
170 {
171 	struct ibv_create_cq cmd;
172 	struct c4iw_create_cq_resp resp;
173 	struct c4iw_cq *chp;
174 	struct c4iw_dev *dev = to_c4iw_dev(context->device);
175 	int ret;
176 
177 	chp = calloc(1, sizeof *chp);
178 	if (!chp) {
179 		return NULL;
180 	}
181 
182 	resp.reserved = 0;
183 	ret = ibv_cmd_create_cq(context, cqe, channel, comp_vector,
184 				&chp->ibv_cq, &cmd, sizeof cmd,
185 				&resp.ibv_resp, sizeof resp);
186 	if (ret)
187 		goto err1;
188 
189 	if (resp.reserved)
190 		PDBG("%s c4iw_create_cq_resp reserved field modified by kernel\n",
191 		     __FUNCTION__);
192 
193 	pthread_spin_init(&chp->lock, PTHREAD_PROCESS_PRIVATE);
194 #ifdef STALL_DETECTION
195 	gettimeofday(&chp->time, NULL);
196 #endif
197 	chp->rhp = dev;
198 	chp->cq.qid_mask = resp.qid_mask;
199 	chp->cq.cqid = resp.cqid;
200 	chp->cq.size = resp.size;
201 	chp->cq.memsize = resp.memsize;
202 	chp->cq.gen = 1;
203 	chp->cq.queue = mmap(NULL, chp->cq.memsize, PROT_READ|PROT_WRITE,
204 			     MAP_SHARED, context->cmd_fd, resp.key);
205 	if (chp->cq.queue == MAP_FAILED)
206 		goto err2;
207 
208 	chp->cq.ugts = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
209 			   context->cmd_fd, resp.gts_key);
210 	if (chp->cq.ugts == MAP_FAILED)
211 		goto err3;
212 
213 	if (dev_is_t4(chp->rhp))
214 		chp->cq.ugts += 1;
215 	else
216 		chp->cq.ugts += 5;
217 	chp->cq.sw_queue = calloc(chp->cq.size, sizeof *chp->cq.queue);
218 	if (!chp->cq.sw_queue)
219 		goto err4;
220 
221 	PDBG("%s cqid 0x%x key %" PRIx64 " va %p memsize %lu gts_key %"
222 	       PRIx64 " va %p qid_mask 0x%x\n",
223 	       __func__, chp->cq.cqid, resp.key, chp->cq.queue,
224 	       chp->cq.memsize, resp.gts_key, chp->cq.ugts, chp->cq.qid_mask);
225 
226 	pthread_spin_lock(&dev->lock);
227 	dev->cqid2ptr[chp->cq.cqid] = chp;
228 	pthread_spin_unlock(&dev->lock);
229 	INC_STAT(cq);
230 	return &chp->ibv_cq;
231 err4:
232 	munmap(MASKED(chp->cq.ugts), c4iw_page_size);
233 err3:
234 	munmap(chp->cq.queue, chp->cq.memsize);
235 err2:
236 	(void)ibv_cmd_destroy_cq(&chp->ibv_cq);
237 err1:
238 	free(chp);
239 	return NULL;
240 }
241 
242 int c4iw_resize_cq(struct ibv_cq *ibcq, int cqe)
243 {
244 #if 0
245 	int ret;
246 
247 	struct ibv_resize_cq cmd;
248 	struct ibv_resize_cq_resp resp;
249 	ret = ibv_cmd_resize_cq(ibcq, cqe, &cmd, sizeof cmd, &resp, sizeof resp);
250 	PDBG("%s ret %d\n", __func__, ret);
251 	return ret;
252 #else
253 	return -ENOSYS;
254 #endif
255 }
256 
257 int c4iw_destroy_cq(struct ibv_cq *ibcq)
258 {
259 	int ret;
260 	struct c4iw_cq *chp = to_c4iw_cq(ibcq);
261 	struct c4iw_dev *dev = to_c4iw_dev(ibcq->context->device);
262 
263 	chp->cq.error = 1;
264 	ret = ibv_cmd_destroy_cq(ibcq);
265 	if (ret) {
266 		return ret;
267 	}
268 	munmap(MASKED(chp->cq.ugts), c4iw_page_size);
269 	munmap(chp->cq.queue, chp->cq.memsize);
270 
271 	pthread_spin_lock(&dev->lock);
272 	dev->cqid2ptr[chp->cq.cqid] = NULL;
273 	pthread_spin_unlock(&dev->lock);
274 
275 	free(chp->cq.sw_queue);
276 	free(chp);
277 	return 0;
278 }
279 
280 struct ibv_srq *c4iw_create_srq(struct ibv_pd *pd,
281 				struct ibv_srq_init_attr *attr)
282 {
283 	return NULL;
284 }
285 
286 int c4iw_modify_srq(struct ibv_srq *srq, struct ibv_srq_attr *attr,
287 		    int attr_mask)
288 {
289 	return ENOSYS;
290 }
291 
292 int c4iw_destroy_srq(struct ibv_srq *srq)
293 {
294 	return ENOSYS;
295 }
296 
297 int c4iw_post_srq_recv(struct ibv_srq *ibsrq, struct ibv_recv_wr *wr,
298 		       struct ibv_recv_wr **bad_wr)
299 {
300 	return ENOSYS;
301 }
302 
303 static struct ibv_qp *create_qp_v0(struct ibv_pd *pd,
304 				   struct ibv_qp_init_attr *attr)
305 {
306 	struct ibv_create_qp cmd;
307 	struct c4iw_create_qp_resp_v0 resp;
308 	struct c4iw_qp *qhp;
309 	struct c4iw_dev *dev = to_c4iw_dev(pd->context->device);
310 	int ret;
311 	void *dbva;
312 
313 	PDBG("%s enter qp\n", __func__);
314 	qhp = calloc(1, sizeof *qhp);
315 	if (!qhp)
316 		goto err1;
317 
318 	ret = ibv_cmd_create_qp(pd, &qhp->ibv_qp, attr, &cmd,
319 				sizeof cmd, &resp.ibv_resp, sizeof resp);
320 	if (ret)
321 		goto err2;
322 
323 	PDBG("%s sqid 0x%x sq key %" PRIx64 " sq db/gts key %" PRIx64
324 	       " rqid 0x%x rq key %" PRIx64 " rq db/gts key %" PRIx64
325 	       " qid_mask 0x%x\n",
326 		__func__,
327 		resp.sqid, resp.sq_key, resp.sq_db_gts_key,
328 		resp.rqid, resp.rq_key, resp.rq_db_gts_key, resp.qid_mask);
329 
330 	qhp->wq.qid_mask = resp.qid_mask;
331 	qhp->rhp = dev;
332 	qhp->wq.sq.qid = resp.sqid;
333 	qhp->wq.sq.size = resp.sq_size;
334 	qhp->wq.sq.memsize = resp.sq_memsize;
335 	qhp->wq.sq.flags = 0;
336 	qhp->wq.rq.msn = 1;
337 	qhp->wq.rq.qid = resp.rqid;
338 	qhp->wq.rq.size = resp.rq_size;
339 	qhp->wq.rq.memsize = resp.rq_memsize;
340 	pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE);
341 
342 	dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
343 		    pd->context->cmd_fd, resp.sq_db_gts_key);
344 	if (dbva == MAP_FAILED)
345 		goto err3;
346 
347 	qhp->wq.sq.udb = dbva;
348 	qhp->wq.sq.queue = mmap(NULL, qhp->wq.sq.memsize,
349 			    PROT_WRITE, MAP_SHARED,
350 			    pd->context->cmd_fd, resp.sq_key);
351 	if (qhp->wq.sq.queue == MAP_FAILED)
352 		goto err4;
353 
354 	dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
355 		    pd->context->cmd_fd, resp.rq_db_gts_key);
356 	if (dbva == MAP_FAILED)
357 		goto err5;
358 	qhp->wq.rq.udb = dbva;
359 	qhp->wq.rq.queue = mmap(NULL, qhp->wq.rq.memsize,
360 			    PROT_WRITE, MAP_SHARED,
361 			    pd->context->cmd_fd, resp.rq_key);
362 	if (qhp->wq.rq.queue == MAP_FAILED)
363 		goto err6;
364 
365 	qhp->wq.sq.sw_sq = calloc(qhp->wq.sq.size, sizeof (struct t4_swsqe));
366 	if (!qhp->wq.sq.sw_sq)
367 		goto err7;
368 
369 	qhp->wq.rq.sw_rq = calloc(qhp->wq.rq.size, sizeof (uint64_t));
370 	if (!qhp->wq.rq.sw_rq)
371 		goto err8;
372 
373 	PDBG("%s sq dbva %p sq qva %p sq depth %u sq memsize %lu "
374 	       " rq dbva %p rq qva %p rq depth %u rq memsize %lu\n",
375 	     __func__,
376 	     qhp->wq.sq.udb, qhp->wq.sq.queue,
377 	     qhp->wq.sq.size, qhp->wq.sq.memsize,
378 	     qhp->wq.rq.udb, qhp->wq.rq.queue,
379 	     qhp->wq.rq.size, qhp->wq.rq.memsize);
380 
381 	qhp->sq_sig_all = attr->sq_sig_all;
382 
383 	pthread_spin_lock(&dev->lock);
384 	dev->qpid2ptr[qhp->wq.sq.qid] = qhp;
385 	pthread_spin_unlock(&dev->lock);
386 	INC_STAT(qp);
387 	return &qhp->ibv_qp;
388 err8:
389 	free(qhp->wq.sq.sw_sq);
390 err7:
391 	munmap((void *)qhp->wq.rq.queue, qhp->wq.rq.memsize);
392 err6:
393 	munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size);
394 err5:
395 	munmap((void *)qhp->wq.sq.queue, qhp->wq.sq.memsize);
396 err4:
397 	munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size);
398 err3:
399 	(void)ibv_cmd_destroy_qp(&qhp->ibv_qp);
400 err2:
401 	free(qhp);
402 err1:
403 	return NULL;
404 }
405 
406 static struct ibv_qp *create_qp(struct ibv_pd *pd,
407 				struct ibv_qp_init_attr *attr)
408 {
409 	struct ibv_create_qp cmd;
410 	struct c4iw_create_qp_resp resp;
411 	struct c4iw_qp *qhp;
412 	struct c4iw_dev *dev = to_c4iw_dev(pd->context->device);
413 	struct c4iw_context *ctx = to_c4iw_context(pd->context);
414 	int ret;
415 	void *dbva;
416 
417 	PDBG("%s enter qp\n", __func__);
418 	qhp = calloc(1, sizeof *qhp);
419 	if (!qhp)
420 		goto err1;
421 
422 	ret = ibv_cmd_create_qp(pd, &qhp->ibv_qp, attr, &cmd,
423 				sizeof cmd, &resp.ibv_resp, sizeof resp);
424 	if (ret)
425 		goto err2;
426 
427 	PDBG("%s sqid 0x%x sq key %" PRIx64 " sq db/gts key %" PRIx64
428 	       " rqid 0x%x rq key %" PRIx64 " rq db/gts key %" PRIx64
429 	       " qid_mask 0x%x\n",
430 		__func__,
431 		resp.sqid, resp.sq_key, resp.sq_db_gts_key,
432 		resp.rqid, resp.rq_key, resp.rq_db_gts_key, resp.qid_mask);
433 
434 	qhp->wq.qid_mask = resp.qid_mask;
435 	qhp->rhp = dev;
436 	qhp->wq.sq.qid = resp.sqid;
437 	qhp->wq.sq.size = resp.sq_size;
438 	qhp->wq.sq.memsize = resp.sq_memsize;
439 	qhp->wq.sq.flags = resp.flags & C4IW_QPF_ONCHIP ? T4_SQ_ONCHIP : 0;
440 	qhp->wq.sq.flush_cidx = -1;
441 	qhp->wq.rq.msn = 1;
442 	qhp->wq.rq.qid = resp.rqid;
443 	qhp->wq.rq.size = resp.rq_size;
444 	qhp->wq.rq.memsize = resp.rq_memsize;
445 	if (ma_wr && resp.sq_memsize < (resp.sq_size + 1) *
446 	    sizeof *qhp->wq.sq.queue + 16*sizeof(__be64) ) {
447 		ma_wr = 0;
448 		fprintf(stderr, "libcxgb4 warning - downlevel iw_cxgb4 driver. "
449 			"MA workaround disabled.\n");
450 	}
451 	pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE);
452 
453 	dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
454 		    pd->context->cmd_fd, resp.sq_db_gts_key);
455 	if (dbva == MAP_FAILED)
456 		goto err3;
457 	qhp->wq.sq.udb = dbva;
458 	if (!dev_is_t4(qhp->rhp)) {
459 		unsigned long segment_offset = 128 * (qhp->wq.sq.qid &
460 						      qhp->wq.qid_mask);
461 
462 		if (segment_offset < c4iw_page_size) {
463 			qhp->wq.sq.udb += segment_offset / 4;
464 			qhp->wq.sq.wc_reg_available = 1;
465 		} else
466 			qhp->wq.sq.bar2_qid = qhp->wq.sq.qid & qhp->wq.qid_mask;
467 		qhp->wq.sq.udb += 2;
468 	}
469 
470 	qhp->wq.sq.queue = mmap(NULL, qhp->wq.sq.memsize,
471 			    PROT_READ|PROT_WRITE, MAP_SHARED,
472 			    pd->context->cmd_fd, resp.sq_key);
473 	if (qhp->wq.sq.queue == MAP_FAILED)
474 		goto err4;
475 
476 	dbva = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED,
477 		    pd->context->cmd_fd, resp.rq_db_gts_key);
478 	if (dbva == MAP_FAILED)
479 		goto err5;
480 	qhp->wq.rq.udb = dbva;
481 	if (!dev_is_t4(qhp->rhp)) {
482 		unsigned long segment_offset = 128 * (qhp->wq.rq.qid &
483 						      qhp->wq.qid_mask);
484 
485 		if (segment_offset < c4iw_page_size) {
486 			qhp->wq.rq.udb += segment_offset / 4;
487 			qhp->wq.rq.wc_reg_available = 1;
488 		} else
489 			qhp->wq.rq.bar2_qid = qhp->wq.rq.qid & qhp->wq.qid_mask;
490 		qhp->wq.rq.udb += 2;
491 	}
492 	qhp->wq.rq.queue = mmap(NULL, qhp->wq.rq.memsize,
493 			    PROT_READ|PROT_WRITE, MAP_SHARED,
494 			    pd->context->cmd_fd, resp.rq_key);
495 	if (qhp->wq.rq.queue == MAP_FAILED)
496 		goto err6;
497 
498 	qhp->wq.sq.sw_sq = calloc(qhp->wq.sq.size, sizeof (struct t4_swsqe));
499 	if (!qhp->wq.sq.sw_sq)
500 		goto err7;
501 
502 	qhp->wq.rq.sw_rq = calloc(qhp->wq.rq.size, sizeof (uint64_t));
503 	if (!qhp->wq.rq.sw_rq)
504 		goto err8;
505 
506 	if (t4_sq_onchip(&qhp->wq)) {
507 		qhp->wq.sq.ma_sync = mmap(NULL, c4iw_page_size, PROT_WRITE,
508 					  MAP_SHARED, pd->context->cmd_fd,
509 					  resp.ma_sync_key);
510 		if (qhp->wq.sq.ma_sync == MAP_FAILED)
511 			goto err9;
512 		qhp->wq.sq.ma_sync += (A_PCIE_MA_SYNC & (c4iw_page_size - 1));
513 	}
514 
515 	if (ctx->status_page_size) {
516 		qhp->wq.db_offp = &ctx->status_page->db_off;
517 	} else {
518 		qhp->wq.db_offp =
519 			&qhp->wq.rq.queue[qhp->wq.rq.size].status.db_off;
520 	}
521 
522 	PDBG("%s sq dbva %p sq qva %p sq depth %u sq memsize %lu "
523 	       " rq dbva %p rq qva %p rq depth %u rq memsize %lu\n",
524 	     __func__,
525 	     qhp->wq.sq.udb, qhp->wq.sq.queue,
526 	     qhp->wq.sq.size, qhp->wq.sq.memsize,
527 	     qhp->wq.rq.udb, qhp->wq.rq.queue,
528 	     qhp->wq.rq.size, qhp->wq.rq.memsize);
529 
530 	qhp->sq_sig_all = attr->sq_sig_all;
531 
532 	pthread_spin_lock(&dev->lock);
533 	dev->qpid2ptr[qhp->wq.sq.qid] = qhp;
534 	pthread_spin_unlock(&dev->lock);
535 	INC_STAT(qp);
536 	return &qhp->ibv_qp;
537 err9:
538 	free(qhp->wq.rq.sw_rq);
539 err8:
540 	free(qhp->wq.sq.sw_sq);
541 err7:
542 	munmap((void *)qhp->wq.rq.queue, qhp->wq.rq.memsize);
543 err6:
544 	munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size);
545 err5:
546 	munmap((void *)qhp->wq.sq.queue, qhp->wq.sq.memsize);
547 err4:
548 	munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size);
549 err3:
550 	(void)ibv_cmd_destroy_qp(&qhp->ibv_qp);
551 err2:
552 	free(qhp);
553 err1:
554 	return NULL;
555 }
556 
557 struct ibv_qp *c4iw_create_qp(struct ibv_pd *pd,
558 				     struct ibv_qp_init_attr *attr)
559 {
560 	struct c4iw_dev *dev = to_c4iw_dev(pd->context->device);
561 
562 	if (dev->abi_version == 0)
563 		return create_qp_v0(pd, attr);
564 	return create_qp(pd, attr);
565 }
566 
567 static void reset_qp(struct c4iw_qp *qhp)
568 {
569 	PDBG("%s enter qp %p\n", __func__, qhp);
570 	qhp->wq.sq.cidx = 0;
571 	qhp->wq.sq.wq_pidx = qhp->wq.sq.pidx = qhp->wq.sq.in_use = 0;
572 	qhp->wq.rq.cidx = qhp->wq.rq.pidx = qhp->wq.rq.in_use = 0;
573 	qhp->wq.sq.oldest_read = NULL;
574 	memset(qhp->wq.sq.queue, 0, qhp->wq.sq.memsize);
575 	if (t4_sq_onchip(&qhp->wq))
576 		mmio_flush_writes();
577 	memset(qhp->wq.rq.queue, 0, qhp->wq.rq.memsize);
578 }
579 
580 int c4iw_modify_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr,
581 		   int attr_mask)
582 {
583 	struct ibv_modify_qp cmd = {};
584 	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
585 	int ret;
586 
587 	PDBG("%s enter qp %p new state %d\n", __func__, ibqp, attr_mask & IBV_QP_STATE ? attr->qp_state : -1);
588 	pthread_spin_lock(&qhp->lock);
589 	if (t4_wq_in_error(&qhp->wq))
590 		c4iw_flush_qp(qhp);
591 	ret = ibv_cmd_modify_qp(ibqp, attr, attr_mask, &cmd, sizeof cmd);
592 	if (!ret && (attr_mask & IBV_QP_STATE) && attr->qp_state == IBV_QPS_RESET)
593 		reset_qp(qhp);
594 	pthread_spin_unlock(&qhp->lock);
595 	return ret;
596 }
597 
598 int c4iw_destroy_qp(struct ibv_qp *ibqp)
599 {
600 	int ret;
601 	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
602 	struct c4iw_dev *dev = to_c4iw_dev(ibqp->context->device);
603 
604 	PDBG("%s enter qp %p\n", __func__, ibqp);
605 	pthread_spin_lock(&qhp->lock);
606 	c4iw_flush_qp(qhp);
607 	pthread_spin_unlock(&qhp->lock);
608 
609 	ret = ibv_cmd_destroy_qp(ibqp);
610 	if (ret) {
611 		return ret;
612 	}
613 	if (t4_sq_onchip(&qhp->wq)) {
614 		qhp->wq.sq.ma_sync -= (A_PCIE_MA_SYNC & (c4iw_page_size - 1));
615 		munmap((void *)qhp->wq.sq.ma_sync, c4iw_page_size);
616 	}
617 	munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size);
618 	munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size);
619 	munmap(qhp->wq.sq.queue, qhp->wq.sq.memsize);
620 	munmap(qhp->wq.rq.queue, qhp->wq.rq.memsize);
621 
622 	pthread_spin_lock(&dev->lock);
623 	dev->qpid2ptr[qhp->wq.sq.qid] = NULL;
624 	pthread_spin_unlock(&dev->lock);
625 
626 	free(qhp->wq.rq.sw_rq);
627 	free(qhp->wq.sq.sw_sq);
628 	free(qhp);
629 	return 0;
630 }
631 
632 int c4iw_query_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr,
633 		  int attr_mask, struct ibv_qp_init_attr *init_attr)
634 {
635 	struct ibv_query_qp cmd;
636 	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
637 	int ret;
638 
639 	pthread_spin_lock(&qhp->lock);
640 	if (t4_wq_in_error(&qhp->wq))
641 		c4iw_flush_qp(qhp);
642 	ret = ibv_cmd_query_qp(ibqp, attr, attr_mask, init_attr, &cmd, sizeof cmd);
643 	pthread_spin_unlock(&qhp->lock);
644 	return ret;
645 }
646 
647 struct ibv_ah *c4iw_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr)
648 {
649 	return NULL;
650 }
651 
652 int c4iw_destroy_ah(struct ibv_ah *ah)
653 {
654 	return ENOSYS;
655 }
656 
657 int c4iw_attach_mcast(struct ibv_qp *ibqp, const union ibv_gid *gid,
658 		      uint16_t lid)
659 {
660 	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
661 	int ret;
662 
663 	pthread_spin_lock(&qhp->lock);
664 	if (t4_wq_in_error(&qhp->wq))
665 		c4iw_flush_qp(qhp);
666 	ret = ibv_cmd_attach_mcast(ibqp, gid, lid);
667 	pthread_spin_unlock(&qhp->lock);
668 	return ret;
669 }
670 
671 int c4iw_detach_mcast(struct ibv_qp *ibqp, const union ibv_gid *gid,
672 		      uint16_t lid)
673 {
674 	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
675 	int ret;
676 
677 	pthread_spin_lock(&qhp->lock);
678 	if (t4_wq_in_error(&qhp->wq))
679 		c4iw_flush_qp(qhp);
680 	ret = ibv_cmd_detach_mcast(ibqp, gid, lid);
681 	pthread_spin_unlock(&qhp->lock);
682 	return ret;
683 }
684 
685 void c4iw_async_event(struct ibv_async_event *event)
686 {
687 	PDBG("%s type %d obj %p\n", __func__, event->event_type,
688 	event->element.cq);
689 
690 	switch (event->event_type) {
691 	case IBV_EVENT_CQ_ERR:
692 		break;
693 	case IBV_EVENT_QP_FATAL:
694 	case IBV_EVENT_QP_REQ_ERR:
695 	case IBV_EVENT_QP_ACCESS_ERR:
696 	case IBV_EVENT_PATH_MIG_ERR: {
697 		struct c4iw_qp *qhp = to_c4iw_qp(event->element.qp);
698 		pthread_spin_lock(&qhp->lock);
699 		c4iw_flush_qp(qhp);
700 		pthread_spin_unlock(&qhp->lock);
701 		break;
702 	}
703 	case IBV_EVENT_SQ_DRAINED:
704 	case IBV_EVENT_PATH_MIG:
705 	case IBV_EVENT_COMM_EST:
706 	case IBV_EVENT_QP_LAST_WQE_REACHED:
707 	default:
708 		break;
709 	}
710 }
711