Lines Matching defs:qp
47 #include <linux/mlx4/qp.h>
107 struct mlx4_qp *qp;
113 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
118 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn &&
119 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn +
123 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
130 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
131 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3);
137 if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy ||
138 qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp1_proxy) {
147 return !!(qp->flags & MLX4_IB_ROCE_V2_GSI_QP);
151 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
158 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn &&
159 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1);
165 if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy) {
174 static void *get_wqe(struct mlx4_ib_qp *qp, int offset)
176 return mlx4_buf_offset(&qp->buf, offset);
179 static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n)
181 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
184 static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
186 return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift));
194 static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n)
202 buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
215 struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp;
219 event.element.qp = ibqp;
248 qpe_work->type, qpe_work->qp->qpn);
255 mlx4_put_qp(qpe_work->qp);
259 static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
261 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
265 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
274 qpe_work->qp = qp;
281 mlx4_put_qp(qp);
284 static void mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type)
287 type, qp->qpn);
337 bool is_user, bool has_rq, struct mlx4_ib_qp *qp,
349 qp->rq.wqe_cnt = qp->rq.max_gs = 0;
360 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr));
361 qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
362 wqe_size = qp->rq.max_gs * sizeof(struct mlx4_wqe_data_seg);
363 qp->rq.wqe_shift = ilog2(max_t(u32, wqe_size, inl_recv_sz));
368 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt;
369 cap->max_recv_sge = qp->rq.max_gs;
371 cap->max_recv_wr = qp->rq.max_post =
372 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt);
373 cap->max_recv_sge = min(qp->rq.max_gs,
382 enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp)
389 cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
404 send_wqe_overhead(type, qp->flags);
409 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
415 qp->sq_spare_wqes = MLX4_IB_SQ_HEADROOM(qp->sq.wqe_shift);
416 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr +
417 qp->sq_spare_wqes);
419 qp->sq.max_gs =
421 (1 << qp->sq.wqe_shift)) -
422 send_wqe_overhead(type, qp->flags)) /
425 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
426 (qp->sq.wqe_cnt << qp->sq.wqe_shift);
427 if (qp->rq.wqe_shift > qp->sq.wqe_shift) {
428 qp->rq.offset = 0;
429 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
431 qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift;
432 qp->sq.offset = 0;
435 cap->max_send_wr = qp->sq.max_post =
436 qp->sq.wqe_cnt - qp->sq_spare_wqes;
437 cap->max_send_sge = min(qp->sq.max_gs,
447 struct mlx4_ib_qp *qp,
461 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
462 qp->sq.wqe_shift = ucmd->log_sq_stride;
464 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
465 (qp->sq.wqe_cnt << qp->sq.wqe_shift);
470 static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
474 qp->sqp_proxy_rcv =
475 kmalloc_array(qp->rq.wqe_cnt, sizeof(struct mlx4_ib_buf),
477 if (!qp->sqp_proxy_rcv)
479 for (i = 0; i < qp->rq.wqe_cnt; i++) {
480 qp->sqp_proxy_rcv[i].addr =
483 if (!qp->sqp_proxy_rcv[i].addr)
485 qp->sqp_proxy_rcv[i].map =
486 ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr,
489 if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) {
490 kfree(qp->sqp_proxy_rcv[i].addr);
499 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map,
502 kfree(qp->sqp_proxy_rcv[i].addr);
504 kfree(qp->sqp_proxy_rcv);
505 qp->sqp_proxy_rcv = NULL;
509 static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
513 for (i = 0; i < qp->rq.wqe_cnt; i++) {
514 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map,
517 kfree(qp->sqp_proxy_rcv[i].addr);
519 kfree(qp->sqp_proxy_rcv);
541 struct mlx4_ib_qp *qp)
543 mutex_lock(&dev->counters_table[qp->port - 1].mutex);
544 mlx4_counter_free(dev->dev, qp->counter_index->index);
545 list_del(&qp->counter_index->list);
546 mutex_unlock(&dev->counters_table[qp->port - 1].mutex);
548 kfree(qp->counter_index);
549 qp->counter_index = NULL;
657 struct mlx4_ib_qp *qp)
662 qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
664 err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn, 0, qp->mqp.usage);
668 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
672 INIT_LIST_HEAD(&qp->gid_list);
673 INIT_LIST_HEAD(&qp->steering_rules);
675 qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET;
676 qp->state = IB_QPS_RESET;
679 qp->sq_no_prefetch = 1;
680 qp->sq.wqe_cnt = 1;
681 qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
682 qp->buf_size = qp->sq.wqe_cnt << MLX4_IB_MIN_SQ_STRIDE;
683 qp->mtt = (to_mqp(
686 qp->rss_ctx = kzalloc(sizeof(*qp->rss_ctx), GFP_KERNEL);
687 if (!qp->rss_ctx) {
692 err = set_qp_rss(dev, qp->rss_ctx, init_attr, ucmd);
699 kfree(qp->rss_ctx);
702 mlx4_qp_remove(dev->dev, &qp->mqp);
703 mlx4_qp_free(dev->dev, &qp->mqp);
710 static int _mlx4_ib_create_qp_rss(struct ib_pd *pd, struct mlx4_ib_qp *qp,
767 qp->pri.vid = 0xFFFF;
768 qp->alt.vid = 0xFFFF;
770 err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp);
774 qp->ibqp.qp_num = qp->mqp.qpn;
784 struct mlx4_ib_qp *qp, int range_size, int *wqn)
804 qp->mqp.usage);
821 qp->wqn_range = range;
834 struct mlx4_ib_qp *qp, bool dirty_release)
841 range = qp->wqn_range;
862 struct ib_udata *udata, struct mlx4_ib_qp *qp)
877 qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET;
879 spin_lock_init(&qp->sq.lock);
880 spin_lock_init(&qp->rq.lock);
881 INIT_LIST_HEAD(&qp->gid_list);
882 INIT_LIST_HEAD(&qp->steering_rules);
884 qp->state = IB_QPS_RESET;
909 qp->flags |= MLX4_IB_QP_SCATTER_FCS;
911 err = set_rq_size(dev, &init_attr->cap, true, true, qp, qp->inl_recv_sz);
915 qp->sq_no_prefetch = 1;
916 qp->sq.wqe_cnt = 1;
917 qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE;
918 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
919 (qp->sq.wqe_cnt << qp->sq.wqe_shift);
921 qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0);
922 if (IS_ERR(qp->umem)) {
923 err = PTR_ERR(qp->umem);
927 shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
933 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
937 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
941 err = mlx4_ib_db_map_user(udata, wq.db_addr, &qp->db);
944 qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
946 err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn);
950 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
959 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
961 qp->mqp.event = mlx4_ib_wq_event;
969 list_add_tail(&qp->qps_list, &dev->qp_list);
974 list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
976 list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
983 mlx4_ib_release_wqn(context, qp, 0);
985 mlx4_ib_db_unmap_user(context, &qp->db);
988 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
990 ib_umem_release(qp->umem);
997 struct mlx4_ib_qp *qp)
1008 /* When tunneling special qps, we use a plain UD qp */
1054 qp->sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
1055 if (!qp->sqp)
1059 qp->mlx4_ib_qp_type = qp_type;
1061 spin_lock_init(&qp->sq.lock);
1062 spin_lock_init(&qp->rq.lock);
1063 INIT_LIST_HEAD(&qp->gid_list);
1064 INIT_LIST_HEAD(&qp->steering_rules);
1066 qp->state = IB_QPS_RESET;
1068 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
1083 qp->inl_recv_sz = ucmd.inl_recv_sz;
1093 qp->flags |= MLX4_IB_QP_SCATTER_FCS;
1097 qp_has_rq(init_attr), qp, qp->inl_recv_sz);
1101 qp->sq_no_prefetch = ucmd.sq_no_prefetch;
1103 err = set_user_sq_size(dev, qp, &ucmd);
1107 qp->umem =
1108 ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0);
1109 if (IS_ERR(qp->umem)) {
1110 err = PTR_ERR(qp->umem);
1114 shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
1120 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);
1124 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem);
1129 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db);
1133 qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
1136 qp_has_rq(init_attr), qp, 0);
1140 qp->sq_no_prefetch = 0;
1143 qp->flags |= MLX4_IB_QP_LSO;
1148 qp->flags |= MLX4_IB_QP_NETIF;
1155 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
1160 err = mlx4_db_alloc(dev->dev, &qp->db, 0);
1164 *qp->db.db = 0;
1167 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2,
1168 &qp->buf)) {
1173 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift,
1174 &qp->mtt);
1178 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
1182 qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt,
1184 qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
1186 if (!qp->sq.wrid || !qp->rq.wrid) {
1190 qp->mqp.usage = MLX4_RES_USAGE_DRIVER;
1194 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
1196 if (alloc_proxy_bufs(pd->device, qp)) {
1211 qp->mqp.usage);
1213 if (qp->flags & MLX4_IB_QP_NETIF)
1217 &qpn, 0, qp->mqp.usage);
1223 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
1225 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
1230 qp->mqp.qpn |= (1 << 23);
1237 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
1239 qp->mqp.event = mlx4_ib_qp_event;
1247 list_add_tail(&qp->qps_list, &dev->qp_list);
1252 list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
1254 list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
1262 if (qp->flags & MLX4_IB_QP_NETIF)
1268 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
1269 free_proxy_bufs(pd->device, qp);
1273 mlx4_ib_db_unmap_user(context, &qp->db);
1275 kvfree(qp->sq.wrid);
1276 kvfree(qp->rq.wrid);
1280 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
1283 if (!qp->umem)
1284 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
1285 ib_umem_release(qp->umem);
1289 mlx4_db_free(dev->dev, &qp->db);
1292 kfree(qp->sqp);
1340 static void del_gid_entries(struct mlx4_ib_qp *qp)
1344 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1350 static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp)
1352 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT)
1353 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd);
1355 return to_mpd(qp->ibqp.pd);
1358 static void get_cqs(struct mlx4_ib_qp *qp, enum mlx4_ib_source_type src,
1361 switch (qp->ibqp.qp_type) {
1363 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq);
1367 *send_cq = to_mcq(qp->ibqp.send_cq);
1371 *recv_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.recv_cq) :
1372 to_mcq(qp->ibwq.cq);
1373 *send_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.send_cq) :
1379 static void destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1381 if (qp->state != IB_QPS_RESET) {
1384 for (i = 0; i < (1 << qp->ibqp.rwq_ind_tbl->log_ind_tbl_size);
1386 struct ib_wq *ibwq = qp->ibqp.rwq_ind_tbl->ind_tbl[i];
1396 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
1397 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
1399 qp->mqp.qpn);
1402 mlx4_qp_remove(dev->dev, &qp->mqp);
1403 mlx4_qp_free(dev->dev, &qp->mqp);
1404 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
1405 del_gid_entries(qp);
1408 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
1415 if (qp->state != IB_QPS_RESET) {
1416 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
1417 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
1419 qp->mqp.qpn);
1420 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
1421 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
1422 qp->pri.smac = 0;
1423 qp->pri.smac_port = 0;
1425 if (qp->alt.smac) {
1426 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
1427 qp->alt.smac = 0;
1429 if (qp->pri.vid < 0x1000) {
1430 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
1431 qp->pri.vid = 0xFFFF;
1432 qp->pri.candidate_vid = 0xFFFF;
1433 qp->pri.update_vid = 0;
1435 if (qp->alt.vid < 0x1000) {
1436 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
1437 qp->alt.vid = 0xFFFF;
1438 qp->alt.candidate_vid = 0xFFFF;
1439 qp->alt.update_vid = 0;
1443 get_cqs(qp, src, &send_cq, &recv_cq);
1449 list_del(&qp->qps_list);
1450 list_del(&qp->cq_send_list);
1451 list_del(&qp->cq_recv_list);
1453 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
1454 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
1456 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1459 mlx4_qp_remove(dev->dev, &qp->mqp);
1464 mlx4_qp_free(dev->dev, &qp->mqp);
1466 if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) {
1467 if (qp->flags & MLX4_IB_QP_NETIF)
1468 mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1);
1475 qp, 1);
1477 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
1480 mlx4_mtt_cleanup(dev->dev, &qp->mtt);
1483 if (qp->rq.wqe_cnt) {
1490 mlx4_ib_db_unmap_user(mcontext, &qp->db);
1493 kvfree(qp->sq.wrid);
1494 kvfree(qp->rq.wrid);
1495 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
1497 free_proxy_bufs(&dev->ib_dev, qp);
1498 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
1499 if (qp->rq.wqe_cnt)
1500 mlx4_db_free(dev->dev, &qp->db);
1502 ib_umem_release(qp->umem);
1504 del_gid_entries(qp);
1524 static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp,
1533 return _mlx4_ib_create_qp_rss(pd, qp, init_attr, udata);
1582 qp->pri.vid = 0xFFFF;
1583 qp->alt.vid = 0xFFFF;
1584 err = create_qp_common(pd, init_attr, udata, 0, qp);
1588 qp->ibqp.qp_num = qp->mqp.qpn;
1589 qp->xrcdn = xrcdn;
1607 qp->pri.vid = 0xFFFF;
1608 qp->alt.vid = 0xFFFF;
1609 err = create_qp_common(pd, init_attr, udata, sqpn, qp);
1616 rdma_restrack_no_track(&qp->ibqp.res);
1618 qp->port = init_attr->port_num;
1619 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 :
1635 struct mlx4_ib_qp *qp = to_mqp(ibqp);
1639 mutex_init(&qp->mutex);
1640 ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata);
1646 struct mlx4_ib_sqp *sqp = qp->sqp;
1668 static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
1670 struct mlx4_ib_dev *dev = to_mdev(qp->device);
1671 struct mlx4_ib_qp *mqp = to_mqp(qp);
1686 if (qp->rwq_ind_tbl) {
1696 int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
1698 struct mlx4_ib_qp *mqp = to_mqp(qp);
1707 return _mlx4_ib_destroy_qp(qp, udata);
1734 static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr,
1744 dest_rd_atomic = qp->resp_depth;
1749 access_flags = qp->atomic_rd_en;
1847 /* no current vlan tag in qp */
1860 /* have current vlan tag. unregister it at modify-qp success */
1898 static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
1904 return _mlx4_set_path(dev, &qp->ah_attr,
1911 const struct ib_qp_attr *qp,
1916 return _mlx4_set_path(dev, &qp->alt_ah_attr,
1922 static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1926 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1927 if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) {
1929 ge->port = qp->port;
1935 struct mlx4_ib_qp *qp,
1941 u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]);
1943 context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6);
1944 if (!qp->pri.smac && !qp->pri.smac_port) {
1945 smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac);
1947 qp->pri.candidate_smac_index = smac_index;
1948 qp->pri.candidate_smac = u64_mac;
1949 qp->pri.candidate_smac_port = qp->port;
1958 static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
1964 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) !=
1966 !(qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) ||
1982 qp->counter_index = new_counter_index;
1984 mutex_lock(&dev->counters_table[qp->port - 1].mutex);
1986 &dev->counters_table[qp->port - 1].counters_list);
1987 mutex_unlock(&dev->counters_table[qp->port - 1].mutex);
2096 struct mlx4_ib_qp *qp)
2103 rss_context->base_qpn = cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz);
2105 cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz & 0xffffff);
2106 if (qp->rss_ctx->flags & (MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6))
2108 rss_context->flags = qp->rss_ctx->flags;
2112 memcpy(rss_context->rss_key, qp->rss_ctx->rss_key,
2127 struct mlx4_ib_qp *qp;
2146 qp = to_mqp((struct ib_qp *)ibwq);
2156 qp = to_mqp(ibqp);
2158 pd = get_pd(qp);
2163 rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
2172 (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16));
2191 if (qp->inl_recv_sz)
2194 if (qp->flags & MLX4_IB_QP_SCATTER_FCS)
2202 if (qp->flags & MLX4_IB_QP_LSO)
2218 if (qp->rq.wqe_cnt)
2219 context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;
2220 context->rq_size_stride |= qp->rq.wqe_shift - 4;
2223 if (qp->sq.wqe_cnt)
2224 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3;
2225 context->sq_size_stride |= qp->sq.wqe_shift - 4;
2227 if (new_state == IB_QPS_RESET && qp->counter_index)
2228 mlx4_ib_free_qp_counter(dev, qp);
2231 context->sq_size_stride |= !!qp->sq_no_prefetch << 7;
2232 context->xrcd = cpu_to_be32((u32) qp->xrcdn);
2256 err = create_qp_lb_counter(dev, qp);
2261 dev->counters_table[qp->port - 1].default_counter;
2262 if (qp->counter_index)
2263 counter_index = qp->counter_index->index;
2268 if (qp->counter_index) {
2278 if (qp->flags & MLX4_IB_QP_NETIF) {
2279 mlx4_ib_steer_qp_reg(dev, qp, 1);
2284 enum ib_gid_type gid_type = qp->flags & MLX4_IB_ROCE_V2_GSI_QP ?
2293 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV)
2301 attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
2316 if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
2350 if (mlx4_set_alt_path(dev, attr, attr_mask, qp,
2364 get_cqs(qp, src_type, &send_cq, &recv_cq);
2404 context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask);
2418 /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */
2420 if (qp->mlx4_ib_qp_type &
2425 !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) &&
2443 if (qp->rq.wqe_cnt &&
2446 context->db_rec_addr = cpu_to_be64(qp->db.dma);
2452 context->pri_path.sched_queue = (qp->port - 1) << 6;
2453 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
2454 qp->mlx4_ib_qp_type &
2457 if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI)
2460 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV)
2464 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
2466 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI ||
2467 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI)
2470 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD ||
2471 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
2472 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
2473 err = handle_eth_ud_smac_index(dev, qp, context);
2478 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
2479 dev->qp1_proxy[qp->port - 1] = qp;
2496 &dev->ib_dev, qp->port) ==
2527 for (i = 0; i < qp->sq.wqe_cnt; ++i) {
2528 ctrl = get_send_wqe(qp, i);
2531 1 << (qp->sq.wqe_shift - 4);
2532 stamp_send_wqe(qp, i);
2539 fill_qp_rss_context(context, qp);
2543 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state),
2545 sqd_event, &qp->mqp);
2549 qp->state = new_state;
2552 qp->atomic_rd_en = attr->qp_access_flags;
2554 qp->resp_depth = attr->max_dest_rd_atomic;
2556 qp->port = attr->port_num;
2557 update_mcg_macs(dev, qp);
2560 qp->alt_port = attr->alt_port_num;
2562 if (is_sqp(dev, qp))
2563 store_sqp_attrs(qp->sqp, attr, attr_mask);
2569 if (is_qp0(dev, qp)) {
2571 if (mlx4_INIT_PORT(dev->dev, qp->port))
2573 qp->port);
2577 mlx4_CLOSE_PORT(dev->dev, qp->port);
2586 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
2589 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
2591 qp->rq.head = 0;
2592 qp->rq.tail = 0;
2593 qp->sq.head = 0;
2594 qp->sq.tail = 0;
2595 qp->sq_next_wqe = 0;
2596 if (qp->rq.wqe_cnt)
2597 *qp->db.db = 0;
2599 if (qp->flags & MLX4_IB_QP_NETIF)
2600 mlx4_ib_steer_qp_reg(dev, qp, 0);
2602 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
2603 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
2604 qp->pri.smac = 0;
2605 qp->pri.smac_port = 0;
2607 if (qp->alt.smac) {
2608 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
2609 qp->alt.smac = 0;
2611 if (qp->pri.vid < 0x1000) {
2612 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
2613 qp->pri.vid = 0xFFFF;
2614 qp->pri.candidate_vid = 0xFFFF;
2615 qp->pri.update_vid = 0;
2618 if (qp->alt.vid < 0x1000) {
2619 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
2620 qp->alt.vid = 0xFFFF;
2621 qp->alt.candidate_vid = 0xFFFF;
2622 qp->alt.update_vid = 0;
2626 if (err && qp->counter_index)
2627 mlx4_ib_free_qp_counter(dev, qp);
2629 mlx4_ib_steer_qp_reg(dev, qp, 0);
2631 if (qp->pri.candidate_smac ||
2632 (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) {
2634 mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac);
2636 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port))
2637 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
2638 qp->pri.smac = qp->pri.candidate_smac;
2639 qp->pri.smac_index = qp->pri.candidate_smac_index;
2640 qp->pri.smac_port = qp->pri.candidate_smac_port;
2642 qp->pri.candidate_smac = 0;
2643 qp->pri.candidate_smac_index = 0;
2644 qp->pri.candidate_smac_port = 0;
2646 if (qp->alt.candidate_smac) {
2648 mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac);
2650 if (qp->alt.smac)
2651 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
2652 qp->alt.smac = qp->alt.candidate_smac;
2653 qp->alt.smac_index = qp->alt.candidate_smac_index;
2654 qp->alt.smac_port = qp->alt.candidate_smac_port;
2656 qp->alt.candidate_smac = 0;
2657 qp->alt.candidate_smac_index = 0;
2658 qp->alt.candidate_smac_port = 0;
2661 if (qp->pri.update_vid) {
2663 if (qp->pri.candidate_vid < 0x1000)
2664 mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port,
2665 qp->pri.candidate_vid);
2667 if (qp->pri.vid < 0x1000)
2668 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port,
2669 qp->pri.vid);
2670 qp->pri.vid = qp->pri.candidate_vid;
2671 qp->pri.vlan_port = qp->pri.candidate_vlan_port;
2672 qp->pri.vlan_index = qp->pri.candidate_vlan_index;
2674 qp->pri.candidate_vid = 0xFFFF;
2675 qp->pri.update_vid = 0;
2678 if (qp->alt.update_vid) {
2680 if (qp->alt.candidate_vid < 0x1000)
2681 mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port,
2682 qp->alt.candidate_vid);
2684 if (qp->alt.vid < 0x1000)
2685 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port,
2686 qp->alt.vid);
2687 qp->alt.vid = qp->alt.candidate_vid;
2688 qp->alt.vlan_port = qp->alt.candidate_vlan_port;
2689 qp->alt.vlan_index = qp->alt.candidate_vlan_index;
2691 qp->alt.candidate_vid = 0xFFFF;
2692 qp->alt.update_vid = 0;
2707 struct mlx4_ib_qp *qp = to_mqp(ibqp);
2710 mutex_lock(&qp->mutex);
2712 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
2777 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
2827 mutex_unlock(&qp->mutex);
2868 static int build_sriov_qp0_header(struct mlx4_ib_qp *qp,
2872 struct mlx4_ib_dev *mdev = to_mdev(qp->ibqp.device);
2873 struct mlx4_ib_sqp *sqp = qp->sqp;
2874 struct ib_device *ib_dev = qp->ibqp.device;
2896 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER)
2901 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) {
2918 err = ib_get_cached_pkey(ib_dev, qp->port, 0, &pkey);
2922 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
2926 cpu_to_be32(mdev->dev->caps.spec_qps[qp->port - 1].qp0_tunnel);
2930 if (mlx4_get_parav_qkey(mdev->dev, qp->mqp.qpn, &qkey))
2933 if (vf_get_qp0_qkey(mdev->dev, qp->mqp.qpn, &qkey))
2937 sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->mqp.qpn);
3021 static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr,
3024 struct mlx4_ib_sqp *sqp = qp->sqp;
3025 struct ib_device *ib_dev = qp->ibqp.device;
3049 is_eth = rdma_port_get_link_layer(qp->ibqp.device, qp->port) == IB_LINK_LAYER_ETHERNET;
3063 err = fill_gid_by_hw_index(ibdev, qp->port,
3115 .demux[qp->port - 1]
3120 ->sriov.demux[qp->port - 1]
3154 cpu_to_be32((!qp->ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) |
3203 !qp->ibqp.qp_num ?
3207 qp->port);
3208 if (qp->ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15)
3214 if (!qp->ibqp.qp_num)
3215 err = ib_get_cached_pkey(ib_dev, qp->port, sqp->pkey_index,
3218 err = ib_get_cached_pkey(ib_dev, qp->port, wr->pkey_index,
3228 sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num);
3489 const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp,
3497 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
3498 wr->wr.num_sge > qp->sq.max_gs - (halign >> 4)))
3533 struct mlx4_ib_qp *qp = to_mqp(ibqp);
3550 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
3551 struct mlx4_ib_sqp *sqp = qp->sqp;
3558 if (!fill_gid_by_hw_index(mdev, qp->port,
3561 qp = (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ?
3562 to_mqp(sqp->roce_v2_gsi) : qp;
3569 spin_lock_irqsave(&qp->sq.lock, flags);
3578 ind = qp->sq_next_wqe;
3584 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
3590 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
3596 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
3597 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
3607 qp->sq_signal_bits;
3614 switch (qp->mlx4_ib_qp_type) {
3678 err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl,
3689 /* this is a UD qp used in MAD responses to slaves. */
3702 err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen,
3715 err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl,
3733 /* If we are tunneling special qps, this is a UD qp.
3735 * the tunnel qp, and then add a header with address
3739 qp->mlx4_ib_qp_type);
3749 err = build_mlx_header(qp, ud_wr(wr), ctrl, &seglen);
3774 if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI ||
3775 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI ||
3776 qp->mlx4_ib_qp_type &
3810 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
3818 stamp_send_wqe(qp, ind + qp->sq_spare_wqes);
3824 qp->sq.head += nreq;
3832 writel_relaxed(qp->doorbell_qpn,
3835 stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1);
3837 qp->sq_next_wqe = ind;
3840 spin_unlock_irqrestore(&qp->sq.lock, flags);
3854 struct mlx4_ib_qp *qp = to_mqp(ibqp);
3864 max_gs = qp->rq.max_gs;
3865 spin_lock_irqsave(&qp->rq.lock, flags);
3875 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
3878 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
3884 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
3890 scat = get_recv_wqe(qp, ind);
3892 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER |
3895 qp->sqp_proxy_rcv[ind].map,
3902 scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map);
3916 qp->rq.wrid[ind] = wr->wr_id;
3918 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
3923 qp->rq.head += nreq;
3931 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
3934 spin_unlock_irqrestore(&qp->rq.lock, flags);
4022 struct mlx4_ib_qp *qp = to_mqp(ibqp);
4030 mutex_lock(&qp->mutex);
4032 if (qp->state == IB_QPS_RESET) {
4037 err = mlx4_qp_query(dev->dev, &qp->mqp, &context);
4045 qp->state = to_ib_qp_state(mlx4_state);
4046 qp_attr->qp_state = qp->state;
4057 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC ||
4058 qp->ibqp.qp_type == IB_QPT_XRC_INI ||
4059 qp->ibqp.qp_type == IB_QPT_XRC_TGT) {
4069 qp_attr->port_num = qp->port;
4073 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
4089 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
4090 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
4093 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
4094 qp_attr->cap.max_send_sge = qp->sq.max_gs;
4109 if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)
4112 if (qp->flags & MLX4_IB_QP_LSO)
4115 if (qp->flags & MLX4_IB_QP_NETIF)
4119 qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ?
4123 mutex_unlock(&qp->mutex);
4133 struct mlx4_ib_qp *qp;
4169 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
4170 if (!qp)
4173 mutex_init(&qp->mutex);
4174 qp->pri.vid = 0xFFFF;
4175 qp->alt.vid = 0xFFFF;
4187 err = create_rq(pd, &ib_qp_init_attr, udata, qp);
4189 kfree(qp);
4193 qp->ibwq.event_handler = init_attr->event_handler;
4194 qp->ibwq.wq_num = qp->mqp.qpn;
4195 qp->ibwq.state = IB_WQS_RESET;
4197 return &qp->ibwq;
4215 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
4224 qp_cur_state = qp->state;
4233 attr.port_num = qp->port;
4263 qp->state = qp_new_state;
4271 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
4308 mutex_lock(&qp->mutex);
4313 if (qp->rss_usecnt)
4319 mutex_unlock(&qp->mutex);
4327 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
4329 if (qp->counter_index)
4330 mlx4_ib_free_qp_counter(dev, qp);
4332 destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
4334 kfree(qp);
4454 void mlx4_ib_drain_sq(struct ib_qp *qp)
4456 struct ib_cq *cq = qp->send_cq;
4468 struct mlx4_ib_dev *dev = to_mdev(qp->device);
4471 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
4480 ret = _mlx4_ib_post_send(qp, &swr.wr, &bad_swr, true);
4489 void mlx4_ib_drain_rq(struct ib_qp *qp)
4491 struct ib_cq *cq = qp->recv_cq;
4497 struct mlx4_ib_dev *dev = to_mdev(qp->device);
4500 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
4510 ret = _mlx4_ib_post_recv(qp, &rwr, &bad_rwr, true);