Lines Matching +full:cmd +full:- +full:db

2  * Copyright (c) 2006-2016 Chelsio, Inc. All rights reserved.
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
44 #include "cxgb4-abi.h"
50 struct ibv_query_device cmd; in c4iw_query_device() local
55 ret = ibv_cmd_query_device(context, attr, &raw_fw_ver, &cmd, in c4iw_query_device()
56 sizeof cmd); in c4iw_query_device()
65 snprintf(attr->fw_ver, sizeof attr->fw_ver, in c4iw_query_device()
74 struct ibv_query_port cmd; in c4iw_query_port() local
76 return ibv_cmd_query_port(context, port, attr, &cmd, sizeof cmd); in c4iw_query_port()
81 struct ibv_alloc_pd cmd; in c4iw_alloc_pd() local
89 if (ibv_cmd_alloc_pd(context, &pd->ibv_pd, &cmd, sizeof cmd, in c4iw_alloc_pd()
95 return &pd->ibv_pd; in c4iw_alloc_pd()
115 struct ibv_reg_mr cmd; in __c4iw_reg_mr() local
117 struct c4iw_dev *dev = to_c4iw_dev(pd->context->device); in __c4iw_reg_mr()
124 access, &mhp->ibv_mr, &cmd, sizeof cmd, in __c4iw_reg_mr()
130 mhp->va_fbo = hca_va; in __c4iw_reg_mr()
131 mhp->len = length; in __c4iw_reg_mr()
134 __func__, mhp->ibv_mr.rkey, mhp->va_fbo, mhp->len); in __c4iw_reg_mr()
136 pthread_spin_lock(&dev->lock); in __c4iw_reg_mr()
137 dev->mmid2ptr[c4iw_mmid(mhp->ibv_mr.lkey)] = mhp; in __c4iw_reg_mr()
138 pthread_spin_unlock(&dev->lock); in __c4iw_reg_mr()
140 return &mhp->ibv_mr; in __c4iw_reg_mr()
153 struct c4iw_dev *dev = to_c4iw_dev(mr->pd->context->device); in c4iw_dereg_mr()
159 pthread_spin_lock(&dev->lock); in c4iw_dereg_mr()
160 dev->mmid2ptr[c4iw_mmid(mr->lkey)] = NULL; in c4iw_dereg_mr()
161 pthread_spin_unlock(&dev->lock); in c4iw_dereg_mr()
171 struct ibv_create_cq cmd; in c4iw_create_cq() local
174 struct c4iw_dev *dev = to_c4iw_dev(context->device); in c4iw_create_cq()
184 &chp->ibv_cq, &cmd, sizeof cmd, in c4iw_create_cq()
193 ret = pthread_spin_init(&chp->lock, PTHREAD_PROCESS_PRIVATE); in c4iw_create_cq()
197 gettimeofday(&chp->time, NULL); in c4iw_create_cq()
199 chp->rhp = dev; in c4iw_create_cq()
200 chp->cq.qid_mask = resp.qid_mask; in c4iw_create_cq()
201 chp->cq.cqid = resp.cqid; in c4iw_create_cq()
202 chp->cq.size = resp.size; in c4iw_create_cq()
203 chp->cq.memsize = resp.memsize; in c4iw_create_cq()
204 chp->cq.gen = 1; in c4iw_create_cq()
205 chp->cq.queue = mmap(NULL, chp->cq.memsize, PROT_READ|PROT_WRITE, in c4iw_create_cq()
206 MAP_SHARED, context->cmd_fd, resp.key); in c4iw_create_cq()
207 if (chp->cq.queue == MAP_FAILED) in c4iw_create_cq()
210 chp->cq.ugts = mmap(NULL, c4iw_page_size, PROT_WRITE, MAP_SHARED, in c4iw_create_cq()
211 context->cmd_fd, resp.gts_key); in c4iw_create_cq()
212 if (chp->cq.ugts == MAP_FAILED) in c4iw_create_cq()
215 if (dev_is_t4(chp->rhp)) in c4iw_create_cq()
216 chp->cq.ugts += 1; in c4iw_create_cq()
218 chp->cq.ugts += 5; in c4iw_create_cq()
219 chp->cq.sw_queue = calloc(chp->cq.size, sizeof *chp->cq.queue); in c4iw_create_cq()
220 if (!chp->cq.sw_queue) in c4iw_create_cq()
225 __func__, chp->cq.cqid, resp.key, chp->cq.queue, in c4iw_create_cq()
226 chp->cq.memsize, resp.gts_key, chp->cq.ugts, chp->cq.qid_mask); in c4iw_create_cq()
228 pthread_spin_lock(&dev->lock); in c4iw_create_cq()
229 dev->cqid2ptr[chp->cq.cqid] = chp; in c4iw_create_cq()
230 pthread_spin_unlock(&dev->lock); in c4iw_create_cq()
232 return &chp->ibv_cq; in c4iw_create_cq()
234 munmap(MASKED(chp->cq.ugts), c4iw_page_size); in c4iw_create_cq()
236 munmap(chp->cq.queue, chp->cq.memsize); in c4iw_create_cq()
238 pthread_spin_destroy(&chp->lock); in c4iw_create_cq()
240 (void)ibv_cmd_destroy_cq(&chp->ibv_cq); in c4iw_create_cq()
251 struct ibv_resize_cq cmd; in c4iw_resize_cq()
253 ret = ibv_cmd_resize_cq(ibcq, cqe, &cmd, sizeof cmd, &resp, sizeof resp); in c4iw_resize_cq()
257 return -ENOSYS; in c4iw_resize_cq()
265 struct c4iw_dev *dev = to_c4iw_dev(ibcq->context->device); in c4iw_destroy_cq()
267 chp->cq.error = 1; in c4iw_destroy_cq()
273 munmap(MASKED(chp->cq.ugts), c4iw_page_size); in c4iw_destroy_cq()
274 munmap(chp->cq.queue, chp->cq.memsize); in c4iw_destroy_cq()
276 pthread_spin_lock(&dev->lock); in c4iw_destroy_cq()
277 dev->cqid2ptr[chp->cq.cqid] = NULL; in c4iw_destroy_cq()
278 pthread_spin_unlock(&dev->lock); in c4iw_destroy_cq()
280 free(chp->cq.sw_queue); in c4iw_destroy_cq()
281 pthread_spin_destroy(&chp->lock); in c4iw_destroy_cq()
312 struct ibv_create_qp cmd; in create_qp_v0() local
315 struct c4iw_dev *dev = to_c4iw_dev(pd->context->device); in create_qp_v0()
324 ret = ibv_cmd_create_qp(pd, &qhp->ibv_qp, attr, &cmd, in create_qp_v0()
325 sizeof cmd, &resp.ibv_resp, sizeof resp); in create_qp_v0()
329 PDBG("%s sqid 0x%x sq key %" PRIx64 " sq db/gts key %" PRIx64 in create_qp_v0()
330 " rqid 0x%x rq key %" PRIx64 " rq db/gts key %" PRIx64 in create_qp_v0()
336 qhp->wq.qid_mask = resp.qid_mask; in create_qp_v0()
337 qhp->rhp = dev; in create_qp_v0()
338 qhp->wq.sq.qid = resp.sqid; in create_qp_v0()
339 qhp->wq.sq.size = resp.sq_size; in create_qp_v0()
340 qhp->wq.sq.memsize = resp.sq_memsize; in create_qp_v0()
341 qhp->wq.sq.flags = 0; in create_qp_v0()
342 qhp->wq.rq.msn = 1; in create_qp_v0()
343 qhp->wq.rq.qid = resp.rqid; in create_qp_v0()
344 qhp->wq.rq.size = resp.rq_size; in create_qp_v0()
345 qhp->wq.rq.memsize = resp.rq_memsize; in create_qp_v0()
346 ret = pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE); in create_qp_v0()
351 pd->context->cmd_fd, resp.sq_db_gts_key); in create_qp_v0()
355 qhp->wq.sq.udb = dbva; in create_qp_v0()
356 qhp->wq.sq.queue = mmap(NULL, qhp->wq.sq.memsize, in create_qp_v0()
358 pd->context->cmd_fd, resp.sq_key); in create_qp_v0()
359 if (qhp->wq.sq.queue == MAP_FAILED) in create_qp_v0()
363 pd->context->cmd_fd, resp.rq_db_gts_key); in create_qp_v0()
366 qhp->wq.rq.udb = dbva; in create_qp_v0()
367 qhp->wq.rq.queue = mmap(NULL, qhp->wq.rq.memsize, in create_qp_v0()
369 pd->context->cmd_fd, resp.rq_key); in create_qp_v0()
370 if (qhp->wq.rq.queue == MAP_FAILED) in create_qp_v0()
373 qhp->wq.sq.sw_sq = calloc(qhp->wq.sq.size, sizeof (struct t4_swsqe)); in create_qp_v0()
374 if (!qhp->wq.sq.sw_sq) in create_qp_v0()
377 qhp->wq.rq.sw_rq = calloc(qhp->wq.rq.size, sizeof (uint64_t)); in create_qp_v0()
378 if (!qhp->wq.rq.sw_rq) in create_qp_v0()
384 qhp->wq.sq.udb, qhp->wq.sq.queue, in create_qp_v0()
385 qhp->wq.sq.size, qhp->wq.sq.memsize, in create_qp_v0()
386 qhp->wq.rq.udb, qhp->wq.rq.queue, in create_qp_v0()
387 qhp->wq.rq.size, qhp->wq.rq.memsize); in create_qp_v0()
389 qhp->sq_sig_all = attr->sq_sig_all; in create_qp_v0()
391 pthread_spin_lock(&dev->lock); in create_qp_v0()
392 dev->qpid2ptr[qhp->wq.sq.qid] = qhp; in create_qp_v0()
393 pthread_spin_unlock(&dev->lock); in create_qp_v0()
395 return &qhp->ibv_qp; in create_qp_v0()
397 free(qhp->wq.sq.sw_sq); in create_qp_v0()
399 munmap((void *)qhp->wq.rq.queue, qhp->wq.rq.memsize); in create_qp_v0()
401 munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size); in create_qp_v0()
403 munmap((void *)qhp->wq.sq.queue, qhp->wq.sq.memsize); in create_qp_v0()
405 munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size); in create_qp_v0()
407 pthread_spin_destroy(&qhp->lock); in create_qp_v0()
409 (void)ibv_cmd_destroy_qp(&qhp->ibv_qp); in create_qp_v0()
419 struct ibv_create_qp cmd; in create_qp() local
422 struct c4iw_dev *dev = to_c4iw_dev(pd->context->device); in create_qp()
423 struct c4iw_context *ctx = to_c4iw_context(pd->context); in create_qp()
432 ret = ibv_cmd_create_qp(pd, &qhp->ibv_qp, attr, &cmd, in create_qp()
433 sizeof cmd, &resp.ibv_resp, sizeof resp); in create_qp()
437 PDBG("%s sqid 0x%x sq key %" PRIx64 " sq db/gts key %" PRIx64 in create_qp()
438 " rqid 0x%x rq key %" PRIx64 " rq db/gts key %" PRIx64 in create_qp()
444 qhp->wq.qid_mask = resp.qid_mask; in create_qp()
445 qhp->rhp = dev; in create_qp()
446 qhp->wq.sq.qid = resp.sqid; in create_qp()
447 qhp->wq.sq.size = resp.sq_size; in create_qp()
448 qhp->wq.sq.memsize = resp.sq_memsize; in create_qp()
449 qhp->wq.sq.flags = resp.flags & C4IW_QPF_ONCHIP ? T4_SQ_ONCHIP : 0; in create_qp()
450 qhp->wq.sq.flush_cidx = -1; in create_qp()
451 qhp->wq.rq.msn = 1; in create_qp()
452 qhp->wq.rq.qid = resp.rqid; in create_qp()
453 qhp->wq.rq.size = resp.rq_size; in create_qp()
454 qhp->wq.rq.memsize = resp.rq_memsize; in create_qp()
456 sizeof *qhp->wq.sq.queue + 16*sizeof(__be64) ) { in create_qp()
458 fprintf(stderr, "libcxgb4 warning - downlevel iw_cxgb4 driver. " in create_qp()
461 ret = pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE); in create_qp()
466 pd->context->cmd_fd, resp.sq_db_gts_key); in create_qp()
469 qhp->wq.sq.udb = dbva; in create_qp()
470 if (!dev_is_t4(qhp->rhp)) { in create_qp()
471 unsigned long segment_offset = 128 * (qhp->wq.sq.qid & in create_qp()
472 qhp->wq.qid_mask); in create_qp()
475 qhp->wq.sq.udb += segment_offset / 4; in create_qp()
476 qhp->wq.sq.wc_reg_available = 1; in create_qp()
478 qhp->wq.sq.bar2_qid = qhp->wq.sq.qid & qhp->wq.qid_mask; in create_qp()
479 qhp->wq.sq.udb += 2; in create_qp()
482 qhp->wq.sq.queue = mmap(NULL, qhp->wq.sq.memsize, in create_qp()
484 pd->context->cmd_fd, resp.sq_key); in create_qp()
485 if (qhp->wq.sq.queue == MAP_FAILED) in create_qp()
489 pd->context->cmd_fd, resp.rq_db_gts_key); in create_qp()
492 qhp->wq.rq.udb = dbva; in create_qp()
493 if (!dev_is_t4(qhp->rhp)) { in create_qp()
494 unsigned long segment_offset = 128 * (qhp->wq.rq.qid & in create_qp()
495 qhp->wq.qid_mask); in create_qp()
498 qhp->wq.rq.udb += segment_offset / 4; in create_qp()
499 qhp->wq.rq.wc_reg_available = 1; in create_qp()
501 qhp->wq.rq.bar2_qid = qhp->wq.rq.qid & qhp->wq.qid_mask; in create_qp()
502 qhp->wq.rq.udb += 2; in create_qp()
504 qhp->wq.rq.queue = mmap(NULL, qhp->wq.rq.memsize, in create_qp()
506 pd->context->cmd_fd, resp.rq_key); in create_qp()
507 if (qhp->wq.rq.queue == MAP_FAILED) in create_qp()
510 qhp->wq.sq.sw_sq = calloc(qhp->wq.sq.size, sizeof (struct t4_swsqe)); in create_qp()
511 if (!qhp->wq.sq.sw_sq) in create_qp()
514 qhp->wq.rq.sw_rq = calloc(qhp->wq.rq.size, sizeof (uint64_t)); in create_qp()
515 if (!qhp->wq.rq.sw_rq) in create_qp()
518 if (t4_sq_onchip(&qhp->wq)) { in create_qp()
519 qhp->wq.sq.ma_sync = mmap(NULL, c4iw_page_size, PROT_WRITE, in create_qp()
520 MAP_SHARED, pd->context->cmd_fd, in create_qp()
522 if (qhp->wq.sq.ma_sync == MAP_FAILED) in create_qp()
524 qhp->wq.sq.ma_sync += (A_PCIE_MA_SYNC & (c4iw_page_size - 1)); in create_qp()
527 if (ctx->status_page_size) { in create_qp()
528 qhp->wq.db_offp = &ctx->status_page->db_off; in create_qp()
530 qhp->wq.db_offp = in create_qp()
531 &qhp->wq.rq.queue[qhp->wq.rq.size].status.db_off; in create_qp()
537 qhp->wq.sq.udb, qhp->wq.sq.queue, in create_qp()
538 qhp->wq.sq.size, qhp->wq.sq.memsize, in create_qp()
539 qhp->wq.rq.udb, qhp->wq.rq.queue, in create_qp()
540 qhp->wq.rq.size, qhp->wq.rq.memsize); in create_qp()
542 qhp->sq_sig_all = attr->sq_sig_all; in create_qp()
544 pthread_spin_lock(&dev->lock); in create_qp()
545 dev->qpid2ptr[qhp->wq.sq.qid] = qhp; in create_qp()
546 pthread_spin_unlock(&dev->lock); in create_qp()
548 return &qhp->ibv_qp; in create_qp()
550 free(qhp->wq.rq.sw_rq); in create_qp()
552 free(qhp->wq.sq.sw_sq); in create_qp()
554 munmap((void *)qhp->wq.rq.queue, qhp->wq.rq.memsize); in create_qp()
556 munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size); in create_qp()
558 munmap((void *)qhp->wq.sq.queue, qhp->wq.sq.memsize); in create_qp()
560 munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size); in create_qp()
562 pthread_spin_destroy(&qhp->lock); in create_qp()
564 (void)ibv_cmd_destroy_qp(&qhp->ibv_qp); in create_qp()
574 struct c4iw_dev *dev = to_c4iw_dev(pd->context->device); in c4iw_create_qp()
576 if (dev->abi_version == 0) in c4iw_create_qp()
584 qhp->wq.sq.cidx = 0; in reset_qp()
585 qhp->wq.sq.wq_pidx = qhp->wq.sq.pidx = qhp->wq.sq.in_use = 0; in reset_qp()
586 qhp->wq.rq.cidx = qhp->wq.rq.pidx = qhp->wq.rq.in_use = 0; in reset_qp()
587 qhp->wq.sq.oldest_read = NULL; in reset_qp()
588 memset(qhp->wq.sq.queue, 0, qhp->wq.sq.memsize); in reset_qp()
589 if (t4_sq_onchip(&qhp->wq)) in reset_qp()
591 memset(qhp->wq.rq.queue, 0, qhp->wq.rq.memsize); in reset_qp()
597 struct ibv_modify_qp cmd = {}; in c4iw_modify_qp() local
601 …G("%s enter qp %p new state %d\n", __func__, ibqp, attr_mask & IBV_QP_STATE ? attr->qp_state : -1); in c4iw_modify_qp()
602 pthread_spin_lock(&qhp->lock); in c4iw_modify_qp()
603 if (t4_wq_in_error(&qhp->wq)) in c4iw_modify_qp()
605 ret = ibv_cmd_modify_qp(ibqp, attr, attr_mask, &cmd, sizeof cmd); in c4iw_modify_qp()
606 if (!ret && (attr_mask & IBV_QP_STATE) && attr->qp_state == IBV_QPS_RESET) in c4iw_modify_qp()
608 pthread_spin_unlock(&qhp->lock); in c4iw_modify_qp()
616 struct c4iw_dev *dev = to_c4iw_dev(ibqp->context->device); in c4iw_destroy_qp()
619 pthread_spin_lock(&qhp->lock); in c4iw_destroy_qp()
621 pthread_spin_unlock(&qhp->lock); in c4iw_destroy_qp()
627 if (t4_sq_onchip(&qhp->wq)) { in c4iw_destroy_qp()
628 qhp->wq.sq.ma_sync -= (A_PCIE_MA_SYNC & (c4iw_page_size - 1)); in c4iw_destroy_qp()
629 munmap((void *)qhp->wq.sq.ma_sync, c4iw_page_size); in c4iw_destroy_qp()
631 munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size); in c4iw_destroy_qp()
632 munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size); in c4iw_destroy_qp()
633 munmap(qhp->wq.sq.queue, qhp->wq.sq.memsize); in c4iw_destroy_qp()
634 munmap(qhp->wq.rq.queue, qhp->wq.rq.memsize); in c4iw_destroy_qp()
636 pthread_spin_lock(&dev->lock); in c4iw_destroy_qp()
637 dev->qpid2ptr[qhp->wq.sq.qid] = NULL; in c4iw_destroy_qp()
638 pthread_spin_unlock(&dev->lock); in c4iw_destroy_qp()
640 free(qhp->wq.rq.sw_rq); in c4iw_destroy_qp()
641 free(qhp->wq.sq.sw_sq); in c4iw_destroy_qp()
642 pthread_spin_destroy(&qhp->lock); in c4iw_destroy_qp()
650 struct ibv_query_qp cmd; in c4iw_query_qp() local
654 pthread_spin_lock(&qhp->lock); in c4iw_query_qp()
655 if (t4_wq_in_error(&qhp->wq)) in c4iw_query_qp()
657 ret = ibv_cmd_query_qp(ibqp, attr, attr_mask, init_attr, &cmd, sizeof cmd); in c4iw_query_qp()
658 pthread_spin_unlock(&qhp->lock); in c4iw_query_qp()
678 pthread_spin_lock(&qhp->lock); in c4iw_attach_mcast()
679 if (t4_wq_in_error(&qhp->wq)) in c4iw_attach_mcast()
682 pthread_spin_unlock(&qhp->lock); in c4iw_attach_mcast()
692 pthread_spin_lock(&qhp->lock); in c4iw_detach_mcast()
693 if (t4_wq_in_error(&qhp->wq)) in c4iw_detach_mcast()
696 pthread_spin_unlock(&qhp->lock); in c4iw_detach_mcast()
702 PDBG("%s type %d obj %p\n", __func__, event->event_type, in c4iw_async_event()
703 event->element.cq); in c4iw_async_event()
705 switch (event->event_type) { in c4iw_async_event()
712 struct c4iw_qp *qhp = to_c4iw_qp(event->element.qp); in c4iw_async_event()
713 pthread_spin_lock(&qhp->lock); in c4iw_async_event()
715 pthread_spin_unlock(&qhp->lock); in c4iw_async_event()