Lines Matching refs:qhp

314 	struct c4iw_qp *qhp;  in create_qp_v0()  local
320 qhp = calloc(1, sizeof *qhp); in create_qp_v0()
321 if (!qhp) in create_qp_v0()
324 ret = ibv_cmd_create_qp(pd, &qhp->ibv_qp, attr, &cmd, in create_qp_v0()
336 qhp->wq.qid_mask = resp.qid_mask; in create_qp_v0()
337 qhp->rhp = dev; in create_qp_v0()
338 qhp->wq.sq.qid = resp.sqid; in create_qp_v0()
339 qhp->wq.sq.size = resp.sq_size; in create_qp_v0()
340 qhp->wq.sq.memsize = resp.sq_memsize; in create_qp_v0()
341 qhp->wq.sq.flags = 0; in create_qp_v0()
342 qhp->wq.rq.msn = 1; in create_qp_v0()
343 qhp->wq.rq.qid = resp.rqid; in create_qp_v0()
344 qhp->wq.rq.size = resp.rq_size; in create_qp_v0()
345 qhp->wq.rq.memsize = resp.rq_memsize; in create_qp_v0()
346 ret = pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE); in create_qp_v0()
355 qhp->wq.sq.udb = dbva; in create_qp_v0()
356 qhp->wq.sq.queue = mmap(NULL, qhp->wq.sq.memsize, in create_qp_v0()
359 if (qhp->wq.sq.queue == MAP_FAILED) in create_qp_v0()
366 qhp->wq.rq.udb = dbva; in create_qp_v0()
367 qhp->wq.rq.queue = mmap(NULL, qhp->wq.rq.memsize, in create_qp_v0()
370 if (qhp->wq.rq.queue == MAP_FAILED) in create_qp_v0()
373 qhp->wq.sq.sw_sq = calloc(qhp->wq.sq.size, sizeof (struct t4_swsqe)); in create_qp_v0()
374 if (!qhp->wq.sq.sw_sq) in create_qp_v0()
377 qhp->wq.rq.sw_rq = calloc(qhp->wq.rq.size, sizeof (uint64_t)); in create_qp_v0()
378 if (!qhp->wq.rq.sw_rq) in create_qp_v0()
384 qhp->wq.sq.udb, qhp->wq.sq.queue, in create_qp_v0()
385 qhp->wq.sq.size, qhp->wq.sq.memsize, in create_qp_v0()
386 qhp->wq.rq.udb, qhp->wq.rq.queue, in create_qp_v0()
387 qhp->wq.rq.size, qhp->wq.rq.memsize); in create_qp_v0()
389 qhp->sq_sig_all = attr->sq_sig_all; in create_qp_v0()
392 dev->qpid2ptr[qhp->wq.sq.qid] = qhp; in create_qp_v0()
395 return &qhp->ibv_qp; in create_qp_v0()
397 free(qhp->wq.sq.sw_sq); in create_qp_v0()
399 munmap((void *)qhp->wq.rq.queue, qhp->wq.rq.memsize); in create_qp_v0()
401 munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size); in create_qp_v0()
403 munmap((void *)qhp->wq.sq.queue, qhp->wq.sq.memsize); in create_qp_v0()
405 munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size); in create_qp_v0()
407 pthread_spin_destroy(&qhp->lock); in create_qp_v0()
409 (void)ibv_cmd_destroy_qp(&qhp->ibv_qp); in create_qp_v0()
411 free(qhp); in create_qp_v0()
421 struct c4iw_qp *qhp; in create_qp() local
428 qhp = calloc(1, sizeof *qhp); in create_qp()
429 if (!qhp) in create_qp()
432 ret = ibv_cmd_create_qp(pd, &qhp->ibv_qp, attr, &cmd, in create_qp()
444 qhp->wq.qid_mask = resp.qid_mask; in create_qp()
445 qhp->rhp = dev; in create_qp()
446 qhp->wq.sq.qid = resp.sqid; in create_qp()
447 qhp->wq.sq.size = resp.sq_size; in create_qp()
448 qhp->wq.sq.memsize = resp.sq_memsize; in create_qp()
449 qhp->wq.sq.flags = resp.flags & C4IW_QPF_ONCHIP ? T4_SQ_ONCHIP : 0; in create_qp()
450 qhp->wq.sq.flush_cidx = -1; in create_qp()
451 qhp->wq.rq.msn = 1; in create_qp()
452 qhp->wq.rq.qid = resp.rqid; in create_qp()
453 qhp->wq.rq.size = resp.rq_size; in create_qp()
454 qhp->wq.rq.memsize = resp.rq_memsize; in create_qp()
456 sizeof *qhp->wq.sq.queue + 16*sizeof(__be64) ) { in create_qp()
461 ret = pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE); in create_qp()
469 qhp->wq.sq.udb = dbva; in create_qp()
470 if (!dev_is_t4(qhp->rhp)) { in create_qp()
471 unsigned long segment_offset = 128 * (qhp->wq.sq.qid & in create_qp()
472 qhp->wq.qid_mask); in create_qp()
475 qhp->wq.sq.udb += segment_offset / 4; in create_qp()
476 qhp->wq.sq.wc_reg_available = 1; in create_qp()
478 qhp->wq.sq.bar2_qid = qhp->wq.sq.qid & qhp->wq.qid_mask; in create_qp()
479 qhp->wq.sq.udb += 2; in create_qp()
482 qhp->wq.sq.queue = mmap(NULL, qhp->wq.sq.memsize, in create_qp()
485 if (qhp->wq.sq.queue == MAP_FAILED) in create_qp()
492 qhp->wq.rq.udb = dbva; in create_qp()
493 if (!dev_is_t4(qhp->rhp)) { in create_qp()
494 unsigned long segment_offset = 128 * (qhp->wq.rq.qid & in create_qp()
495 qhp->wq.qid_mask); in create_qp()
498 qhp->wq.rq.udb += segment_offset / 4; in create_qp()
499 qhp->wq.rq.wc_reg_available = 1; in create_qp()
501 qhp->wq.rq.bar2_qid = qhp->wq.rq.qid & qhp->wq.qid_mask; in create_qp()
502 qhp->wq.rq.udb += 2; in create_qp()
504 qhp->wq.rq.queue = mmap(NULL, qhp->wq.rq.memsize, in create_qp()
507 if (qhp->wq.rq.queue == MAP_FAILED) in create_qp()
510 qhp->wq.sq.sw_sq = calloc(qhp->wq.sq.size, sizeof (struct t4_swsqe)); in create_qp()
511 if (!qhp->wq.sq.sw_sq) in create_qp()
514 qhp->wq.rq.sw_rq = calloc(qhp->wq.rq.size, sizeof (uint64_t)); in create_qp()
515 if (!qhp->wq.rq.sw_rq) in create_qp()
518 if (t4_sq_onchip(&qhp->wq)) { in create_qp()
519 qhp->wq.sq.ma_sync = mmap(NULL, c4iw_page_size, PROT_WRITE, in create_qp()
522 if (qhp->wq.sq.ma_sync == MAP_FAILED) in create_qp()
524 qhp->wq.sq.ma_sync += (A_PCIE_MA_SYNC & (c4iw_page_size - 1)); in create_qp()
528 qhp->wq.db_offp = &ctx->status_page->db_off; in create_qp()
530 qhp->wq.db_offp = in create_qp()
531 &qhp->wq.rq.queue[qhp->wq.rq.size].status.db_off; in create_qp()
537 qhp->wq.sq.udb, qhp->wq.sq.queue, in create_qp()
538 qhp->wq.sq.size, qhp->wq.sq.memsize, in create_qp()
539 qhp->wq.rq.udb, qhp->wq.rq.queue, in create_qp()
540 qhp->wq.rq.size, qhp->wq.rq.memsize); in create_qp()
542 qhp->sq_sig_all = attr->sq_sig_all; in create_qp()
545 dev->qpid2ptr[qhp->wq.sq.qid] = qhp; in create_qp()
548 return &qhp->ibv_qp; in create_qp()
550 free(qhp->wq.rq.sw_rq); in create_qp()
552 free(qhp->wq.sq.sw_sq); in create_qp()
554 munmap((void *)qhp->wq.rq.queue, qhp->wq.rq.memsize); in create_qp()
556 munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size); in create_qp()
558 munmap((void *)qhp->wq.sq.queue, qhp->wq.sq.memsize); in create_qp()
560 munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size); in create_qp()
562 pthread_spin_destroy(&qhp->lock); in create_qp()
564 (void)ibv_cmd_destroy_qp(&qhp->ibv_qp); in create_qp()
566 free(qhp); in create_qp()
581 static void reset_qp(struct c4iw_qp *qhp) in reset_qp() argument
583 PDBG("%s enter qp %p\n", __func__, qhp); in reset_qp()
584 qhp->wq.sq.cidx = 0; in reset_qp()
585 qhp->wq.sq.wq_pidx = qhp->wq.sq.pidx = qhp->wq.sq.in_use = 0; in reset_qp()
586 qhp->wq.rq.cidx = qhp->wq.rq.pidx = qhp->wq.rq.in_use = 0; in reset_qp()
587 qhp->wq.sq.oldest_read = NULL; in reset_qp()
588 memset(qhp->wq.sq.queue, 0, qhp->wq.sq.memsize); in reset_qp()
589 if (t4_sq_onchip(&qhp->wq)) in reset_qp()
591 memset(qhp->wq.rq.queue, 0, qhp->wq.rq.memsize); in reset_qp()
598 struct c4iw_qp *qhp = to_c4iw_qp(ibqp); in c4iw_modify_qp() local
602 pthread_spin_lock(&qhp->lock); in c4iw_modify_qp()
603 if (t4_wq_in_error(&qhp->wq)) in c4iw_modify_qp()
604 c4iw_flush_qp(qhp); in c4iw_modify_qp()
607 reset_qp(qhp); in c4iw_modify_qp()
608 pthread_spin_unlock(&qhp->lock); in c4iw_modify_qp()
615 struct c4iw_qp *qhp = to_c4iw_qp(ibqp); in c4iw_destroy_qp() local
619 pthread_spin_lock(&qhp->lock); in c4iw_destroy_qp()
620 c4iw_flush_qp(qhp); in c4iw_destroy_qp()
621 pthread_spin_unlock(&qhp->lock); in c4iw_destroy_qp()
627 if (t4_sq_onchip(&qhp->wq)) { in c4iw_destroy_qp()
628 qhp->wq.sq.ma_sync -= (A_PCIE_MA_SYNC & (c4iw_page_size - 1)); in c4iw_destroy_qp()
629 munmap((void *)qhp->wq.sq.ma_sync, c4iw_page_size); in c4iw_destroy_qp()
631 munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size); in c4iw_destroy_qp()
632 munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size); in c4iw_destroy_qp()
633 munmap(qhp->wq.sq.queue, qhp->wq.sq.memsize); in c4iw_destroy_qp()
634 munmap(qhp->wq.rq.queue, qhp->wq.rq.memsize); in c4iw_destroy_qp()
637 dev->qpid2ptr[qhp->wq.sq.qid] = NULL; in c4iw_destroy_qp()
640 free(qhp->wq.rq.sw_rq); in c4iw_destroy_qp()
641 free(qhp->wq.sq.sw_sq); in c4iw_destroy_qp()
642 pthread_spin_destroy(&qhp->lock); in c4iw_destroy_qp()
643 free(qhp); in c4iw_destroy_qp()
651 struct c4iw_qp *qhp = to_c4iw_qp(ibqp); in c4iw_query_qp() local
654 pthread_spin_lock(&qhp->lock); in c4iw_query_qp()
655 if (t4_wq_in_error(&qhp->wq)) in c4iw_query_qp()
656 c4iw_flush_qp(qhp); in c4iw_query_qp()
658 pthread_spin_unlock(&qhp->lock); in c4iw_query_qp()
675 struct c4iw_qp *qhp = to_c4iw_qp(ibqp); in c4iw_attach_mcast() local
678 pthread_spin_lock(&qhp->lock); in c4iw_attach_mcast()
679 if (t4_wq_in_error(&qhp->wq)) in c4iw_attach_mcast()
680 c4iw_flush_qp(qhp); in c4iw_attach_mcast()
682 pthread_spin_unlock(&qhp->lock); in c4iw_attach_mcast()
689 struct c4iw_qp *qhp = to_c4iw_qp(ibqp); in c4iw_detach_mcast() local
692 pthread_spin_lock(&qhp->lock); in c4iw_detach_mcast()
693 if (t4_wq_in_error(&qhp->wq)) in c4iw_detach_mcast()
694 c4iw_flush_qp(qhp); in c4iw_detach_mcast()
696 pthread_spin_unlock(&qhp->lock); in c4iw_detach_mcast()
712 struct c4iw_qp *qhp = to_c4iw_qp(event->element.qp); in c4iw_async_event() local
713 pthread_spin_lock(&qhp->lock); in c4iw_async_event()
714 c4iw_flush_qp(qhp); in c4iw_async_event()
715 pthread_spin_unlock(&qhp->lock); in c4iw_async_event()