Lines Matching +full:cmd +full:- +full:db
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
48 #include "mlx5-abi.h"
60 struct ibv_query_device cmd;
65 ret = ibv_cmd_query_device(context, attr, &raw_fw_ver, &cmd, sizeof cmd);
73 snprintf(attr->fw_ver, sizeof attr->fw_ver,
86 if (!ctx->hca_core_clock)
87 return -EOPNOTSUPP;
91 clockhi = be32toh(READL(ctx->hca_core_clock));
92 clocklo = be32toh(READL(ctx->hca_core_clock + 4));
93 clockhi1 = be32toh(READL(ctx->hca_core_clock));
109 if (values->comp_mask & IBV_VALUES_MASK_RAW_CLOCK) {
114 values->raw_clock.tv_sec = 0;
115 values->raw_clock.tv_nsec = cycles;
120 values->comp_mask = comp_mask;
128 struct ibv_query_port cmd;
130 return ibv_cmd_query_port(context, port, attr, &cmd, sizeof cmd);
135 struct ibv_alloc_pd cmd;
143 if (ibv_cmd_alloc_pd(context, &pd->ibv_pd, &cmd, sizeof cmd,
149 pd->pdn = resp.pdn;
151 return &pd->ibv_pd;
170 struct ibv_reg_mr cmd;
180 &(mr->ibv_mr), &cmd, sizeof(cmd), &resp,
183 mlx5_free_buf(&(mr->buf));
187 mr->alloc_flags = acc;
189 return &mr->ibv_mr;
195 struct ibv_rereg_mr cmd;
202 access, pd, &cmd, sizeof(cmd), &resp,
222 struct ibv_alloc_mw cmd;
232 ret = ibv_cmd_alloc_mw(pd, type, mw, &cmd, sizeof(cmd), &resp,
245 struct ibv_dealloc_mw cmd;
247 ret = ibv_cmd_dealloc_mw(mw, &cmd, sizeof(cmd));
264 return -ENOMEM;
290 return -EINVAL;
347 struct mlx5_create_cq cmd;
354 FILE *fp = to_mctx(context)->dbg_fp;
356 if (!cq_attr->cqe) {
362 if (cq_attr->comp_mask & ~CREATE_CQ_SUPPORTED_COMP_MASK) {
369 if (cq_attr->comp_mask & IBV_CQ_INIT_ATTR_MASK_FLAGS &&
370 cq_attr->flags & ~CREATE_CQ_SUPPORTED_FLAGS) {
377 if (cq_attr->wc_flags & ~CREATE_CQ_SUPPORTED_WC_FLAGS) {
389 memset(&cmd, 0, sizeof cmd);
390 cq->cons_index = 0;
392 if (mlx5_spinlock_init(&cq->lock))
395 ncqe = align_queue_size(cq_attr->cqe + 1);
396 if ((ncqe > (1 << 24)) || (ncqe < (cq_attr->cqe + 1))) {
405 errno = -cqe_sz;
409 if (mlx5_alloc_cq_buf(to_mctx(context), cq, &cq->buf_a, ncqe, cqe_sz)) {
414 cq->dbrec = mlx5_alloc_dbrec(to_mctx(context));
415 if (!cq->dbrec) {
420 cq->dbrec[MLX5_CQ_SET_CI] = 0;
421 cq->dbrec[MLX5_CQ_ARM_DB] = 0;
422 cq->arm_sn = 0;
423 cq->cqe_sz = cqe_sz;
424 cq->flags = cq_alloc_flags;
426 if (cq_attr->comp_mask & IBV_CQ_INIT_ATTR_MASK_FLAGS &&
427 cq_attr->flags & IBV_CREATE_CQ_ATTR_SINGLE_THREADED)
428 cq->flags |= MLX5_CQ_FLAGS_SINGLE_THREADED;
429 cmd.buf_addr = (uintptr_t) cq->buf_a.buf;
430 cmd.db_addr = (uintptr_t) cq->dbrec;
431 cmd.cqe_size = cqe_sz;
434 if (mlx5cq_attr->comp_mask & ~(MLX5DV_CQ_INIT_ATTR_MASK_RESERVED - 1)) {
441 if (mlx5cq_attr->comp_mask & MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE) {
442 if (mctx->cqe_comp_caps.max_num &&
443 (mlx5cq_attr->cqe_comp_res_format &
444 mctx->cqe_comp_caps.supported_format)) {
445 cmd.cqe_comp_en = 1;
446 cmd.cqe_comp_res_format = mlx5cq_attr->cqe_comp_res_format;
455 ret = ibv_cmd_create_cq(context, ncqe - 1, cq_attr->channel,
456 cq_attr->comp_vector,
457 ibv_cq_ex_to_cq(&cq->ibv_cq), &cmd.ibv_cmd,
458 sizeof(cmd), &resp.ibv_resp, sizeof(resp));
464 cq->active_buf = &cq->buf_a;
465 cq->resize_buf = NULL;
466 cq->cqn = resp.cqn;
467 cq->stall_enable = to_mctx(context)->stall_enable;
468 cq->stall_adaptive_enable = to_mctx(context)->stall_adaptive_enable;
469 cq->stall_cycles = to_mctx(context)->stall_cycles;
474 return &cq->ibv_cq;
477 mlx5_free_db(to_mctx(context), cq->dbrec);
480 mlx5_free_cq_buf(to_mctx(context), &cq->buf_a);
483 mlx5_spinlock_destroy(&cq->lock);
527 cq_attr->channel, cq_attr->cq_context);
534 context->ops.destroy_cq(ibv_cq_ex_to_cq(cq));
543 struct mlx5_resize_cq cmd;
544 struct mlx5_context *mctx = to_mctx(ibcq->context);
552 memset(&cmd, 0, sizeof(cmd));
558 mlx5_spin_lock(&cq->lock);
559 cq->active_cqes = cq->ibv_cq.cqe;
560 if (cq->active_buf == &cq->buf_a)
561 cq->resize_buf = &cq->buf_b;
563 cq->resize_buf = &cq->buf_a;
566 if (cqe == ibcq->cqe + 1) {
567 cq->resize_buf = NULL;
573 cq->resize_cqe_sz = cq->cqe_sz;
574 cq->resize_cqes = cqe;
575 err = mlx5_alloc_cq_buf(mctx, cq, cq->resize_buf, cq->resize_cqes, cq->resize_cqe_sz);
577 cq->resize_buf = NULL;
582 cmd.buf_addr = (uintptr_t)cq->resize_buf->buf;
583 cmd.cqe_size = cq->resize_cqe_sz;
585 err = ibv_cmd_resize_cq(ibcq, cqe - 1, &cmd.ibv_cmd, sizeof(cmd),
591 mlx5_free_cq_buf(mctx, cq->active_buf);
592 cq->active_buf = cq->resize_buf;
593 cq->ibv_cq.cqe = cqe - 1;
594 mlx5_spin_unlock(&cq->lock);
595 cq->resize_buf = NULL;
599 mlx5_free_cq_buf(mctx, cq->resize_buf);
600 cq->resize_buf = NULL;
603 mlx5_spin_unlock(&cq->lock);
617 mlx5_free_db(to_mctx(cq->context), to_mcq(cq)->dbrec);
618 mlx5_free_cq_buf(to_mctx(cq->context), to_mcq(cq)->active_buf);
619 mlx5_spinlock_destroy(&mcq->lock);
628 struct mlx5_create_srq cmd;
636 ctx = to_mctx(pd->context);
639 fprintf(stderr, "%s-%d:\n", __func__, __LINE__);
642 ibsrq = &srq->vsrq.srq;
644 memset(&cmd, 0, sizeof cmd);
645 if (mlx5_spinlock_init(&srq->lock)) {
646 fprintf(stderr, "%s-%d:\n", __func__, __LINE__);
650 if (attr->attr.max_wr > ctx->max_srq_recv_wr) {
651 fprintf(stderr, "%s-%d:max_wr %d, max_srq_recv_wr %d\n", __func__, __LINE__,
652 attr->attr.max_wr, ctx->max_srq_recv_wr);
662 max_sge = ctx->max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
663 if (attr->attr.max_sge > max_sge) {
664 fprintf(stderr, "%s-%d:max_wr %d, max_srq_recv_wr %d\n", __func__, __LINE__,
665 attr->attr.max_wr, ctx->max_srq_recv_wr);
670 srq->max = align_queue_size(attr->attr.max_wr + 1);
671 srq->max_gs = attr->attr.max_sge;
672 srq->counter = 0;
674 if (mlx5_alloc_srq_buf(pd->context, srq)) {
675 fprintf(stderr, "%s-%d:\n", __func__, __LINE__);
679 srq->db = mlx5_alloc_dbrec(to_mctx(pd->context));
680 if (!srq->db) {
681 fprintf(stderr, "%s-%d:\n", __func__, __LINE__);
685 *srq->db = 0;
687 cmd.buf_addr = (uintptr_t) srq->buf.buf;
688 cmd.db_addr = (uintptr_t) srq->db;
689 srq->wq_sig = srq_sig_enabled();
690 if (srq->wq_sig)
691 cmd.flags = MLX5_SRQ_FLAG_SIGNATURE;
693 attr->attr.max_sge = srq->max_gs;
694 pthread_mutex_lock(&ctx->srq_table_mutex);
695 ret = ibv_cmd_create_srq(pd, ibsrq, attr, &cmd.ibv_cmd, sizeof(cmd),
704 pthread_mutex_unlock(&ctx->srq_table_mutex);
706 srq->srqn = resp.srqn;
707 srq->rsc.rsn = resp.srqn;
708 srq->rsc.type = MLX5_RSC_TYPE_SRQ;
716 pthread_mutex_unlock(&ctx->srq_table_mutex);
717 mlx5_free_db(to_mctx(pd->context), srq->db);
720 free(srq->wrid);
721 mlx5_free_buf(&srq->buf);
724 mlx5_spinlock_destroy(&srq->lock);
736 struct ibv_modify_srq cmd;
738 return ibv_cmd_modify_srq(srq, attr, attr_mask, &cmd, sizeof cmd);
744 struct ibv_query_srq cmd;
746 return ibv_cmd_query_srq(srq, attr, &cmd, sizeof cmd);
753 struct mlx5_context *ctx = to_mctx(srq->context);
759 if (ctx->cqe_version && msrq->rsc.type == MLX5_RSC_TYPE_XSRQ)
760 mlx5_clear_uidx(ctx, msrq->rsc.rsn);
762 mlx5_clear_srq(ctx, msrq->srqn);
764 mlx5_free_db(ctx, msrq->db);
765 mlx5_free_buf(&msrq->buf);
766 free(msrq->wrid);
767 mlx5_spinlock_destroy(&msrq->lock);
816 return -EINVAL;
831 size = sq_overhead(attr->qp_type);
835 if (attr->cap.max_inline_data) {
837 attr->cap.max_inline_data, 16);
840 if (attr->comp_mask & IBV_QP_INIT_ATTR_MAX_TSO_HEADER) {
841 size += align(attr->max_tso_header, 16);
842 qp->max_tso_header = attr->max_tso_header;
845 max_gather = (ctx->max_sq_desc_sz - size) /
847 if (attr->cap.max_send_sge > max_gather)
848 return -EINVAL;
850 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
853 if (tot_size > ctx->max_sq_desc_sz)
854 return -EINVAL;
866 if (attr->srq)
869 num_scatter = max_t(uint32_t, attr->cap.max_recv_sge, 1);
871 if (qp->wq_sig)
874 if (size > ctx->max_rq_desc_sz)
875 return -EINVAL;
888 FILE *fp = ctx->dbg_fp;
890 if (!attr->cap.max_send_wr)
899 if (wqe_size > ctx->max_sq_desc_sz) {
901 return -EINVAL;
904 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
906 attr->cap.max_inline_data = qp->max_inline_data;
912 if (attr->cap.max_send_wr > 0x7fffffff / ctx->max_sq_desc_sz) {
914 return -EINVAL;
917 wq_size = mlx5_round_up_power_of_two(attr->cap.max_send_wr * wqe_size);
918 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
919 if (qp->sq.wqe_cnt > ctx->max_send_wqebb) {
921 return -EINVAL;
924 qp->sq.wqe_shift = mlx5_ilog2(MLX5_SEND_WQE_BB);
925 qp->sq.max_gs = attr->cap.max_send_sge;
926 qp->sq.max_post = wq_size / wqe_size;
940 if (!attr->max_wr)
941 return -EINVAL;
944 num_scatter = max_t(uint32_t, attr->max_sge, 1);
947 if (rwq->wq_sig)
950 if (wqe_size <= 0 || wqe_size > ctx->max_rq_desc_sz)
951 return -EINVAL;
954 wq_size = mlx5_round_up_power_of_two(attr->max_wr) * wqe_size;
956 rwq->rq.wqe_cnt = wq_size / wqe_size;
957 rwq->rq.wqe_shift = mlx5_ilog2(wqe_size);
958 rwq->rq.max_post = 1 << mlx5_ilog2(wq_size / wqe_size);
959 scat_spc = wqe_size -
960 ((rwq->wq_sig) ? sizeof(struct mlx5_rwqe_sig) : 0);
961 rwq->rq.max_gs = scat_spc / sizeof(struct mlx5_wqe_data_seg);
972 FILE *fp = ctx->dbg_fp;
974 if (!attr->cap.max_recv_wr)
977 if (attr->cap.max_recv_wr > ctx->max_recv_wr) {
979 return -EINVAL;
983 if (wqe_size < 0 || wqe_size > ctx->max_rq_desc_sz) {
985 return -EINVAL;
988 wq_size = mlx5_round_up_power_of_two(attr->cap.max_recv_wr) * wqe_size;
991 qp->rq.wqe_cnt = wq_size / wqe_size;
992 qp->rq.wqe_shift = mlx5_ilog2(wqe_size);
993 qp->rq.max_post = 1 << mlx5_ilog2(wq_size / wqe_size);
994 scat_spc = wqe_size -
995 (qp->wq_sig ? sizeof(struct mlx5_rwqe_sig) : 0);
996 qp->rq.max_gs = scat_spc / sizeof(struct mlx5_wqe_data_seg);
998 qp->rq.wqe_cnt = 0;
999 qp->rq.wqe_shift = 0;
1000 qp->rq.max_post = 0;
1001 qp->rq.max_gs = 0;
1024 qp->sq.offset = ret;
1025 qp->rq.offset = 0;
1035 qp->bf = &ctx->bfs[uuar_index];
1059 if (qp->sq.wqe_cnt) {
1060 qp->sq.wrid = malloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid));
1061 if (!qp->sq.wrid) {
1063 err = -1;
1067 qp->sq.wr_data = malloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data));
1068 if (!qp->sq.wr_data) {
1070 err = -1;
1075 qp->sq.wqe_head = malloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head));
1076 if (!qp->sq.wqe_head) {
1078 err = -1;
1082 if (qp->rq.wqe_cnt) {
1083 qp->rq.wrid = malloc(qp->rq.wqe_cnt * sizeof(uint64_t));
1084 if (!qp->rq.wrid) {
1086 err = -1;
1092 qp_huge_key = qptype2key(qp->ibv_qp->qp_type);
1099 err = mlx5_alloc_prefered_buf(to_mctx(context), &qp->buf,
1100 align(qp->buf_size, to_mdev
1101 (context->device)->page_size),
1102 to_mdev(context->device)->page_size,
1107 err = -ENOMEM;
1111 memset(qp->buf.buf, 0, qp->buf_size);
1113 if (attr->qp_type == IBV_QPT_RAW_PACKET) {
1114 size_t aligned_sq_buf_size = align(qp->sq_buf_size,
1115 to_mdev(context->device)->page_size);
1117 err = mlx5_alloc_prefered_buf(to_mctx(context), &qp->sq_buf,
1119 to_mdev(context->device)->page_size,
1123 err = -ENOMEM;
1127 memset(qp->sq_buf.buf, 0, aligned_sq_buf_size);
1132 mlx5_free_actual_buf(to_mctx(qp->verbs_qp.qp.context), &qp->buf);
1134 if (qp->rq.wrid)
1135 free(qp->rq.wrid);
1137 if (qp->sq.wqe_head)
1138 free(qp->sq.wqe_head);
1140 if (qp->sq.wr_data)
1141 free(qp->sq.wr_data);
1142 if (qp->sq.wrid)
1143 free(qp->sq.wrid);
1150 struct mlx5_context *ctx = to_mctx(qp->ibv_qp->context);
1152 mlx5_free_actual_buf(ctx, &qp->buf);
1154 if (qp->sq_buf.buf)
1155 mlx5_free_actual_buf(ctx, &qp->sq_buf);
1157 if (qp->rq.wrid)
1158 free(qp->rq.wrid);
1160 if (qp->sq.wqe_head)
1161 free(qp->sq.wqe_head);
1163 if (qp->sq.wrid)
1164 free(qp->sq.wrid);
1166 if (qp->sq.wr_data)
1167 free(qp->sq.wr_data);
1178 if (attr->rx_hash_conf.rx_hash_key_len > sizeof(cmd_ex_rss.rx_hash_key)) {
1183 cmd_ex_rss.rx_hash_fields_mask = attr->rx_hash_conf.rx_hash_fields_mask;
1184 cmd_ex_rss.rx_hash_function = attr->rx_hash_conf.rx_hash_function;
1185 cmd_ex_rss.rx_key_len = attr->rx_hash_conf.rx_hash_key_len;
1186 memcpy(cmd_ex_rss.rx_hash_key, attr->rx_hash_conf.rx_hash_key,
1187 attr->rx_hash_conf.rx_hash_key_len);
1189 ret = ibv_cmd_create_qp_ex2(context, &qp->verbs_qp,
1190 sizeof(qp->verbs_qp), attr,
1197 qp->rss_qp = 1;
1203 struct mlx5_create_qp *cmd,
1211 memcpy(&cmd_ex.ibv_cmd.base, &cmd->ibv_cmd.user_handle,
1212 offsetof(typeof(cmd->ibv_cmd), is_srq) +
1213 sizeof(cmd->ibv_cmd.is_srq) -
1214 offsetof(typeof(cmd->ibv_cmd), user_handle));
1216 memcpy(&cmd_ex.drv_ex, &cmd->buf_addr,
1217 offsetof(typeof(*cmd), sq_buf_addr) +
1218 sizeof(cmd->sq_buf_addr) - sizeof(cmd->ibv_cmd));
1220 ret = ibv_cmd_create_qp_ex2(context, &qp->verbs_qp,
1221 sizeof(qp->verbs_qp), attr,
1223 sizeof(cmd_ex), &resp->ibv_resp,
1224 sizeof(resp->ibv_resp), sizeof(*resp));
1248 struct mlx5_create_qp cmd;
1257 FILE *fp = ctx->dbg_fp;
1259 if (attr->comp_mask & ~MLX5_CREATE_QP_SUP_COMP_MASK)
1262 if ((attr->comp_mask & IBV_QP_INIT_ATTR_MAX_TSO_HEADER) &&
1263 (attr->qp_type != IBV_QPT_RAW_PACKET))
1271 ibqp = (struct ibv_qp *)&qp->verbs_qp;
1272 qp->ibv_qp = ibqp;
1274 memset(&cmd, 0, sizeof(cmd));
1278 if (attr->comp_mask & IBV_QP_INIT_ATTR_RX_HASH) {
1286 qp->wq_sig = qp_sig_enabled();
1287 if (qp->wq_sig)
1288 cmd.flags |= MLX5_QP_FLAG_SIGNATURE;
1291 cmd.flags |= MLX5_QP_FLAG_SCATTER_CQE;
1295 errno = -ret;
1299 if (attr->qp_type == IBV_QPT_RAW_PACKET) {
1300 qp->buf_size = qp->sq.offset;
1301 qp->sq_buf_size = ret - qp->buf_size;
1302 qp->sq.offset = 0;
1304 qp->buf_size = ret;
1305 qp->sq_buf_size = 0;
1313 if (attr->qp_type == IBV_QPT_RAW_PACKET) {
1314 qp->sq_start = qp->sq_buf.buf;
1315 qp->sq.qend = qp->sq_buf.buf +
1316 (qp->sq.wqe_cnt << qp->sq.wqe_shift);
1318 qp->sq_start = qp->buf.buf + qp->sq.offset;
1319 qp->sq.qend = qp->buf.buf + qp->sq.offset +
1320 (qp->sq.wqe_cnt << qp->sq.wqe_shift);
1325 if (mlx5_spinlock_init(&qp->sq.lock))
1328 if (mlx5_spinlock_init(&qp->rq.lock))
1331 qp->db = mlx5_alloc_dbrec(ctx);
1332 if (!qp->db) {
1337 qp->db[MLX5_RCV_DBR] = 0;
1338 qp->db[MLX5_SND_DBR] = 0;
1340 cmd.buf_addr = (uintptr_t) qp->buf.buf;
1341 cmd.sq_buf_addr = (attr->qp_type == IBV_QPT_RAW_PACKET) ?
1342 (uintptr_t) qp->sq_buf.buf : 0;
1343 cmd.db_addr = (uintptr_t) qp->db;
1344 cmd.sq_wqe_count = qp->sq.wqe_cnt;
1345 cmd.rq_wqe_count = qp->rq.wqe_cnt;
1346 cmd.rq_wqe_shift = qp->rq.wqe_shift;
1348 if (ctx->atomic_cap == IBV_ATOMIC_HCA)
1349 qp->atomics_enabled = 1;
1351 if (!ctx->cqe_version) {
1352 cmd.uidx = 0xffffff;
1353 pthread_mutex_lock(&ctx->qp_table_mutex);
1354 } else if (!is_xrc_tgt(attr->qp_type)) {
1361 cmd.uidx = usr_idx;
1364 if (attr->comp_mask & MLX5_CREATE_QP_EX2_COMP_MASK)
1365 ret = mlx5_cmd_create_qp_ex(context, attr, &cmd, qp, &resp_ex);
1367 ret = ibv_cmd_create_qp_ex(context, &qp->verbs_qp, sizeof(qp->verbs_qp),
1368 attr, &cmd.ibv_cmd, sizeof(cmd),
1375 uuar_index = (attr->comp_mask & MLX5_CREATE_QP_EX2_COMP_MASK) ?
1377 if (!ctx->cqe_version) {
1378 if (qp->sq.wqe_cnt || qp->rq.wqe_cnt) {
1379 ret = mlx5_store_qp(ctx, ibqp->qp_num, qp);
1386 pthread_mutex_unlock(&ctx->qp_table_mutex);
1391 qp->rq.max_post = qp->rq.wqe_cnt;
1392 if (attr->sq_sig_all)
1393 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
1395 qp->sq_signal_bits = 0;
1397 attr->cap.max_send_wr = qp->sq.max_post;
1398 attr->cap.max_recv_wr = qp->rq.max_post;
1399 attr->cap.max_recv_sge = qp->rq.max_gs;
1401 qp->rsc.type = MLX5_RSC_TYPE_QP;
1402 qp->rsc.rsn = (ctx->cqe_version && !is_xrc_tgt(attr->qp_type)) ?
1403 usr_idx : ibqp->qp_num;
1411 if (!ctx->cqe_version)
1412 pthread_mutex_unlock(&to_mctx(context)->qp_table_mutex);
1413 else if (!is_xrc_tgt(attr->qp_type))
1417 mlx5_free_db(to_mctx(context), qp->db);
1420 mlx5_spinlock_destroy(&qp->rq.lock);
1423 mlx5_spinlock_destroy(&qp->sq.lock);
1444 qp = create_qp(pd->context, &attrx);
1453 struct mlx5_cq *send_cq = to_mcq(qp->send_cq);
1454 struct mlx5_cq *recv_cq = to_mcq(qp->recv_cq);
1458 mlx5_spin_lock(&send_cq->lock);
1459 } else if (send_cq->cqn < recv_cq->cqn) {
1460 mlx5_spin_lock(&send_cq->lock);
1461 mlx5_spin_lock(&recv_cq->lock);
1463 mlx5_spin_lock(&recv_cq->lock);
1464 mlx5_spin_lock(&send_cq->lock);
1467 mlx5_spin_lock(&send_cq->lock);
1469 mlx5_spin_lock(&recv_cq->lock);
1475 struct mlx5_cq *send_cq = to_mcq(qp->send_cq);
1476 struct mlx5_cq *recv_cq = to_mcq(qp->recv_cq);
1480 mlx5_spin_unlock(&send_cq->lock);
1481 } else if (send_cq->cqn < recv_cq->cqn) {
1482 mlx5_spin_unlock(&recv_cq->lock);
1483 mlx5_spin_unlock(&send_cq->lock);
1485 mlx5_spin_unlock(&send_cq->lock);
1486 mlx5_spin_unlock(&recv_cq->lock);
1489 mlx5_spin_unlock(&send_cq->lock);
1491 mlx5_spin_unlock(&recv_cq->lock);
1498 struct mlx5_context *ctx = to_mctx(ibqp->context);
1501 if (qp->rss_qp) {
1508 if (!ctx->cqe_version)
1509 pthread_mutex_lock(&ctx->qp_table_mutex);
1513 if (!ctx->cqe_version)
1514 pthread_mutex_unlock(&ctx->qp_table_mutex);
1520 __mlx5_cq_clean(to_mcq(ibqp->recv_cq), qp->rsc.rsn,
1521 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
1522 if (ibqp->send_cq != ibqp->recv_cq)
1523 __mlx5_cq_clean(to_mcq(ibqp->send_cq), qp->rsc.rsn, NULL);
1525 if (!ctx->cqe_version) {
1526 if (qp->sq.wqe_cnt || qp->rq.wqe_cnt)
1527 mlx5_clear_qp(ctx, ibqp->qp_num);
1531 if (!ctx->cqe_version)
1532 pthread_mutex_unlock(&ctx->qp_table_mutex);
1533 else if (!is_xrc_tgt(ibqp->qp_type))
1534 mlx5_clear_uidx(ctx, qp->rsc.rsn);
1536 mlx5_free_db(ctx, qp->db);
1537 mlx5_spinlock_destroy(&qp->rq.lock);
1538 mlx5_spinlock_destroy(&qp->sq.lock);
1549 struct ibv_query_qp cmd;
1553 if (qp->rss_qp)
1556 ret = ibv_cmd_query_qp(ibqp, attr, attr_mask, init_attr, &cmd, sizeof(cmd));
1560 init_attr->cap.max_send_wr = qp->sq.max_post;
1561 init_attr->cap.max_send_sge = qp->sq.max_gs;
1562 init_attr->cap.max_inline_data = qp->max_inline_data;
1564 attr->cap = init_attr->cap;
1576 struct ibv_modify_qp cmd = {};
1580 struct mlx5_context *context = to_mctx(qp->context);
1582 uint32_t *db;
1584 if (mqp->rss_qp)
1588 switch (qp->qp_type) {
1590 if (context->cached_link_layer[attr->port_num - 1] ==
1592 if (context->cached_device_cap_flags &
1594 mqp->qp_cap_cache |=
1599 context->cached_tso_caps.supported_qpts,
1601 mqp->max_tso =
1602 context->cached_tso_caps.max_tso;
1618 &cmd, sizeof(cmd));
1622 attr->qp_state == IBV_QPS_RESET) {
1623 if (qp->recv_cq) {
1624 mlx5_cq_clean(to_mcq(qp->recv_cq), mqp->rsc.rsn,
1625 qp->srq ? to_msrq(qp->srq) : NULL);
1627 if (qp->send_cq != qp->recv_cq && qp->send_cq)
1628 mlx5_cq_clean(to_mcq(qp->send_cq),
1629 to_mqp(qp)->rsc.rsn, NULL);
1632 db = mqp->db;
1633 db[MLX5_RCV_DBR] = 0;
1634 db[MLX5_SND_DBR] = 0;
1647 attr->qp_state == IBV_QPS_RTR &&
1648 qp->qp_type == IBV_QPT_RAW_PACKET) {
1649 mlx5_spin_lock(&mqp->rq.lock);
1650 mqp->db[MLX5_RCV_DBR] = htobe32(mqp->rq.head & 0xffff);
1651 mlx5_spin_unlock(&mqp->rq.lock);
1661 struct mlx5_context *ctx = to_mctx(pd->context);
1669 if (attr->port_num < 1 || attr->port_num > ctx->num_ports)
1672 if (ctx->cached_link_layer[attr->port_num - 1]) {
1673 is_eth = ctx->cached_link_layer[attr->port_num - 1] ==
1676 if (ibv_query_port(pd->context, attr->port_num, &port_attr))
1682 if (unlikely((!attr->is_global) && is_eth)) {
1692 if (ibv_query_gid_type(pd->context, attr->port_num,
1693 attr->grh.sgid_index, &gid_type))
1697 ah->av.rlid = htobe16(rand() % (RROCE_UDP_SPORT_MAX + 1
1698 - RROCE_UDP_SPORT_MIN)
1705 ah->av.fl_mlid = attr->src_path_bits & 0x7f;
1706 ah->av.rlid = htobe16(attr->dlid);
1709 ah->av.stat_rate_sl = (attr->static_rate << 4) | attr->sl;
1710 if (attr->is_global) {
1711 ah->av.tclass = attr->grh.traffic_class;
1712 ah->av.hop_limit = attr->grh.hop_limit;
1714 ((attr->grh.sgid_index & 0xff) << 20) |
1715 (attr->grh.flow_label & 0xfffff));
1716 ah->av.grh_gid_fl = tmp;
1717 memcpy(ah->av.rgid, attr->grh.dgid.raw, 16);
1721 if (ctx->cmds_supp_uhw & MLX5_USER_CMDS_SUPP_UHW_CREATE_AH) {
1724 if (ibv_cmd_create_ah(pd, &ah->ibv_ah, attr, &resp.ibv_resp, sizeof(resp)))
1727 ah->kern_ah = true;
1728 memcpy(ah->av.rmac, resp.dmac, ETHERNET_LL_SIZE);
1732 if (ibv_resolve_eth_l2_from_gid(pd->context, attr,
1733 ah->av.rmac, &vid))
1738 return &ah->ibv_ah;
1749 if (mah->kern_ah) {
1779 *srq_num = msrq->srqn;
1790 struct ibv_open_xrcd cmd = {};
1798 &cmd, sizeof(cmd), &resp, sizeof(resp));
1804 return &xrcd->xrcd;
1824 struct mlx5_create_srq_ex cmd;
1831 FILE *fp = ctx->dbg_fp;
1837 ibsrq = (struct ibv_srq *)&msrq->vsrq;
1839 memset(&cmd, 0, sizeof(cmd));
1842 if (mlx5_spinlock_init(&msrq->lock)) {
1843 fprintf(stderr, "%s-%d:\n", __func__, __LINE__);
1847 if (attr->attr.max_wr > ctx->max_srq_recv_wr) {
1848 fprintf(stderr, "%s-%d:max_wr %d, max_srq_recv_wr %d\n",
1849 __func__, __LINE__, attr->attr.max_wr,
1850 ctx->max_srq_recv_wr);
1860 max_sge = ctx->max_recv_wr / sizeof(struct mlx5_wqe_data_seg);
1861 if (attr->attr.max_sge > max_sge) {
1862 fprintf(stderr, "%s-%d:max_wr %d, max_srq_recv_wr %d\n",
1863 __func__, __LINE__, attr->attr.max_wr,
1864 ctx->max_srq_recv_wr);
1869 msrq->max = align_queue_size(attr->attr.max_wr + 1);
1870 msrq->max_gs = attr->attr.max_sge;
1871 msrq->counter = 0;
1874 fprintf(stderr, "%s-%d:\n", __func__, __LINE__);
1878 msrq->db = mlx5_alloc_dbrec(ctx);
1879 if (!msrq->db) {
1880 fprintf(stderr, "%s-%d:\n", __func__, __LINE__);
1884 *msrq->db = 0;
1886 cmd.buf_addr = (uintptr_t)msrq->buf.buf;
1887 cmd.db_addr = (uintptr_t)msrq->db;
1888 msrq->wq_sig = srq_sig_enabled();
1889 if (msrq->wq_sig)
1890 cmd.flags = MLX5_SRQ_FLAG_SIGNATURE;
1892 attr->attr.max_sge = msrq->max_gs;
1893 if (ctx->cqe_version) {
1899 cmd.uidx = uidx;
1901 cmd.uidx = 0xffffff;
1902 pthread_mutex_lock(&ctx->srq_table_mutex);
1905 err = ibv_cmd_create_srq_ex(context, &msrq->vsrq, sizeof(msrq->vsrq),
1906 attr, &cmd.ibv_cmd, sizeof(cmd),
1911 if (!ctx->cqe_version) {
1916 pthread_mutex_unlock(&ctx->srq_table_mutex);
1919 msrq->srqn = resp.srqn;
1920 msrq->rsc.type = MLX5_RSC_TYPE_XSRQ;
1921 msrq->rsc.rsn = ctx->cqe_version ? cmd.uidx : resp.srqn;
1929 if (ctx->cqe_version)
1930 mlx5_clear_uidx(ctx, cmd.uidx);
1932 pthread_mutex_unlock(&ctx->srq_table_mutex);
1935 mlx5_free_db(ctx, msrq->db);
1938 free(msrq->wrid);
1939 mlx5_free_buf(&msrq->buf);
1942 mlx5_spinlock_destroy(&msrq->lock);
1953 if (!(attr->comp_mask & IBV_SRQ_INIT_ATTR_TYPE) ||
1954 (attr->srq_type == IBV_SRQT_BASIC))
1955 return mlx5_create_srq(attr->pd,
1957 else if (attr->srq_type == IBV_SRQT_XRC)
1970 struct mlx5_query_device_ex cmd;
1977 int cmd_supp_uhw = mctx->cmds_supp_uhw &
1980 memset(&cmd, 0, sizeof(cmd));
1984 &cmd.ibv_cmd, sizeof(cmd.ibv_cmd), sizeof(cmd),
1990 attr->tso_caps = resp.tso_caps;
1991 attr->rss_caps.rx_hash_fields_mask = resp.rss_caps.rx_hash_fields_mask;
1992 attr->rss_caps.rx_hash_function = resp.rss_caps.rx_hash_function;
1993 attr->packet_pacing_caps = resp.packet_pacing_caps.caps;
1996 mctx->vendor_cap_flags |= MLX5_VENDOR_CAP_FLAGS_MPW;
1998 mctx->cqe_comp_caps = resp.cqe_comp_caps;
2003 a = &attr->orig_attr;
2004 snprintf(a->fw_ver, sizeof(a->fw_ver), "%d.%d.%04d",
2025 mlx5_free_actual_buf(ctx, &rwq->buf);
2026 free(rwq->rq.wrid);
2036 rwq->rq.wrid = malloc(rwq->rq.wqe_cnt * sizeof(uint64_t));
2037 if (!rwq->rq.wrid) {
2039 return -1;
2042 err = mlx5_alloc_prefered_buf(to_mctx(context), &rwq->buf,
2043 align(rwq->buf_size, to_mdev
2044 (context->device)->page_size),
2045 to_mdev(context->device)->page_size,
2050 free(rwq->rq.wrid);
2052 return -1;
2061 struct mlx5_create_wq cmd;
2068 FILE *fp = ctx->dbg_fp;
2070 if (attr->wq_type != IBV_WQT_RQ)
2073 memset(&cmd, 0, sizeof(cmd));
2080 ret = ibv_init_wq(&rwq->wq);
2084 rwq->wq_sig = rwq_sig_enabled(context);
2085 if (rwq->wq_sig)
2086 cmd.drv.flags = MLX5_RWQ_FLAG_SIGNATURE;
2090 errno = -ret;
2094 rwq->buf_size = ret;
2100 if (mlx5_spinlock_init(&rwq->rq.lock))
2103 rwq->db = mlx5_alloc_dbrec(ctx);
2104 if (!rwq->db)
2107 rwq->db[MLX5_RCV_DBR] = 0;
2108 rwq->db[MLX5_SND_DBR] = 0;
2109 rwq->pbuff = rwq->buf.buf + rwq->rq.offset;
2110 rwq->recv_db = &rwq->db[MLX5_RCV_DBR];
2111 cmd.drv.buf_addr = (uintptr_t)rwq->buf.buf;
2112 cmd.drv.db_addr = (uintptr_t)rwq->db;
2113 cmd.drv.rq_wqe_count = rwq->rq.wqe_cnt;
2114 cmd.drv.rq_wqe_shift = rwq->rq.wqe_shift;
2121 cmd.drv.user_index = usr_idx;
2122 err = ibv_cmd_create_wq(context, attr, &rwq->wq, &cmd.ibv_cmd,
2123 sizeof(cmd.ibv_cmd),
2124 sizeof(cmd),
2130 rwq->rsc.type = MLX5_RSC_TYPE_RWQ;
2131 rwq->rsc.rsn = cmd.drv.user_index;
2133 rwq->wq.post_recv = mlx5_post_wq_recv;
2134 return &rwq->wq;
2137 mlx5_clear_uidx(ctx, cmd.drv.user_index);
2139 mlx5_free_db(to_mctx(context), rwq->db);
2141 mlx5_spinlock_destroy(&rwq->rq.lock);
2145 ibv_cleanup_wq(&rwq->wq);
2153 struct mlx5_modify_wq cmd = {};
2156 if ((attr->attr_mask & IBV_WQ_ATTR_STATE) &&
2157 attr->wq_state == IBV_WQS_RDY) {
2158 if ((attr->attr_mask & IBV_WQ_ATTR_CURR_STATE) &&
2159 attr->curr_wq_state != wq->state)
2160 return -EINVAL;
2162 if (wq->state == IBV_WQS_RESET) {
2163 mlx5_spin_lock(&to_mcq(wq->cq)->lock);
2164 __mlx5_cq_clean(to_mcq(wq->cq),
2165 rwq->rsc.rsn, NULL);
2166 mlx5_spin_unlock(&to_mcq(wq->cq)->lock);
2168 rwq->db[MLX5_RCV_DBR] = 0;
2169 rwq->db[MLX5_SND_DBR] = 0;
2173 return ibv_cmd_modify_wq(wq, attr, &cmd.ibv_cmd, sizeof(cmd.ibv_cmd), sizeof(cmd));
2185 mlx5_spin_lock(&to_mcq(wq->cq)->lock);
2186 __mlx5_cq_clean(to_mcq(wq->cq), rwq->rsc.rsn, NULL);
2187 mlx5_spin_unlock(&to_mcq(wq->cq)->lock);
2188 mlx5_clear_uidx(to_mctx(wq->context), rwq->rsc.rsn);
2189 mlx5_free_db(to_mctx(wq->context), rwq->db);
2190 mlx5_spinlock_destroy(&rwq->rq.lock);
2191 mlx5_free_rwq_buf(rwq, wq->context);
2192 ibv_cleanup_wq(&rwq->wq);
2201 struct ibv_create_rwq_ind_table *cmd;
2209 num_tbl_entries = 1 << init_attr->log_ind_tbl_size;
2214 cmd_size = required_tbl_size + sizeof(*cmd);
2215 cmd = calloc(1, cmd_size);
2216 if (!cmd)
2224 err = ibv_cmd_create_rwq_ind_table(context, init_attr, ind_table, cmd,
2230 free(cmd);
2236 free(cmd);