Lines Matching full:qp
100 static void *get_wqe(struct mlx5_ib_qp *qp, int offset) in get_wqe() argument
102 return mlx5_buf_offset(&qp->buf, offset); in get_wqe()
105 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n) in get_recv_wqe() argument
107 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); in get_recv_wqe()
110 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n) in mlx5_get_send_wqe() argument
112 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE)); in mlx5_get_send_wqe()
118 * @qp: QP to copy from.
132 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, in mlx5_ib_read_user_wqe() argument
136 struct ib_device *ibdev = qp->ibqp.device; in mlx5_ib_read_user_wqe()
138 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; in mlx5_ib_read_user_wqe()
147 mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n", in mlx5_ib_read_user_wqe()
148 qp->ibqp.qp_type); in mlx5_ib_read_user_wqe()
187 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) in mlx5_ib_qp_event() argument
189 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; in mlx5_ib_qp_event()
194 to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port; in mlx5_ib_qp_event()
199 event.element.qp = ibqp; in mlx5_ib_qp_event()
226 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn); in mlx5_ib_qp_event()
235 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) in set_rq_size() argument
245 qp->rq.max_gs = 0; in set_rq_size()
246 qp->rq.wqe_cnt = 0; in set_rq_size()
247 qp->rq.wqe_shift = 0; in set_rq_size()
252 qp->rq.wqe_cnt = ucmd->rq_wqe_count; in set_rq_size()
253 qp->rq.wqe_shift = ucmd->rq_wqe_shift; in set_rq_size()
254 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; in set_rq_size()
255 qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size()
257 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0; in set_rq_size()
262 qp->rq.wqe_cnt = wq_size / wqe_size; in set_rq_size()
270 qp->rq.wqe_shift = ilog2(wqe_size); in set_rq_size()
271 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; in set_rq_size()
272 qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size()
375 struct mlx5_ib_qp *qp) in calc_sq_size() argument
394 qp->max_inline_data = wqe_size - sq_overhead(attr) - in calc_sq_size()
396 attr->cap.max_inline_data = qp->max_inline_data; in calc_sq_size()
399 qp->signature_en = true; in calc_sq_size()
402 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; in calc_sq_size()
403 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { in calc_sq_size()
405 qp->sq.wqe_cnt, in calc_sq_size()
409 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); in calc_sq_size()
410 qp->sq.max_gs = get_send_sge(attr, wqe_size); in calc_sq_size()
411 if (qp->sq.max_gs < attr->cap.max_send_sge) in calc_sq_size()
414 attr->cap.max_send_sge = qp->sq.max_gs; in calc_sq_size()
415 qp->sq.max_post = wq_size / wqe_size; in calc_sq_size()
416 attr->cap.max_send_wr = qp->sq.max_post; in calc_sq_size()
422 struct mlx5_ib_qp *qp, in set_user_buf_size() argument
427 int desc_sz = 1 << qp->sq.wqe_shift; in set_user_buf_size()
441 qp->sq.wqe_cnt = ucmd->sq_wqe_count; in set_user_buf_size()
443 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { in set_user_buf_size()
445 qp->sq.wqe_cnt, in set_user_buf_size()
451 base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift; in set_user_buf_size()
452 qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6; in set_user_buf_size()
454 base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_user_buf_size()
455 (qp->sq.wqe_cnt << 6); in set_user_buf_size()
761 struct mlx5_ib_qp *qp, struct ib_udata *udata, in create_user_qp() argument
804 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) in create_user_qp()
819 qp->rq.offset = 0; in create_user_qp()
820 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); in create_user_qp()
821 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; in create_user_qp()
823 err = set_user_buf_size(dev, qp, &ucmd, base, attr); in create_user_qp()
864 qp->bfregn = bfregn; in create_user_qp()
866 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db); in create_user_qp()
877 qp->create_type = MLX5_QP_USER; in create_user_qp()
882 mlx5_ib_db_unmap_user(context, &qp->db); in create_user_qp()
897 static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct mlx5_ib_qp *qp, in destroy_qp_user() argument
907 mlx5_ib_db_unmap_user(context, &qp->db); in destroy_qp_user()
915 if (qp->bfregn != MLX5_IB_INVALID_BFREG) in destroy_qp_user()
916 mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn); in destroy_qp_user()
921 struct mlx5_ib_qp *qp, in create_kernel_qp() argument
936 spin_lock_init(&qp->bf.lock32); in create_kernel_qp()
939 qp->bf.bfreg = &dev->fp_bfreg; in create_kernel_qp()
941 qp->bf.bfreg = &dev->wc_bfreg; in create_kernel_qp()
943 qp->bf.bfreg = &dev->bfreg; in create_kernel_qp()
948 qp->bf.buf_size = (1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size)) / 2; in create_kernel_qp()
949 uar_index = qp->bf.bfreg->index; in create_kernel_qp()
951 err = calc_sq_size(dev, init_attr, qp); in create_kernel_qp()
957 qp->rq.offset = 0; in create_kernel_qp()
958 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; in create_kernel_qp()
959 base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); in create_kernel_qp()
962 2 * PAGE_SIZE, &qp->buf); in create_kernel_qp()
968 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); in create_kernel_qp()
970 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages; in create_kernel_qp()
980 MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); in create_kernel_qp()
988 qp->flags |= MLX5_IB_QP_SQPN_QP1; in create_kernel_qp()
991 mlx5_fill_page_array(&qp->buf, in create_kernel_qp()
994 err = mlx5_db_alloc(dev->mdev, &qp->db); in create_kernel_qp()
1000 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL); in create_kernel_qp()
1001 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL); in create_kernel_qp()
1002 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL); in create_kernel_qp()
1003 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL); in create_kernel_qp()
1004 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL); in create_kernel_qp()
1006 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || in create_kernel_qp()
1007 !qp->sq.w_list || !qp->sq.wqe_head) { in create_kernel_qp()
1011 qp->create_type = MLX5_QP_KERNEL; in create_kernel_qp()
1016 kfree(qp->sq.wqe_head); in create_kernel_qp()
1017 kfree(qp->sq.w_list); in create_kernel_qp()
1018 kfree(qp->sq.wrid); in create_kernel_qp()
1019 kfree(qp->sq.wr_data); in create_kernel_qp()
1020 kfree(qp->rq.wrid); in create_kernel_qp()
1021 mlx5_db_free(dev->mdev, &qp->db); in create_kernel_qp()
1027 mlx5_buf_free(dev->mdev, &qp->buf); in create_kernel_qp()
1031 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) in destroy_qp_kernel() argument
1033 kfree(qp->sq.wqe_head); in destroy_qp_kernel()
1034 kfree(qp->sq.w_list); in destroy_qp_kernel()
1035 kfree(qp->sq.wrid); in destroy_qp_kernel()
1036 kfree(qp->sq.wr_data); in destroy_qp_kernel()
1037 kfree(qp->rq.wrid); in destroy_qp_kernel()
1038 mlx5_db_free(dev->mdev, &qp->db); in destroy_qp_kernel()
1039 mlx5_buf_free(dev->mdev, &qp->buf); in destroy_qp_kernel()
1042 static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) in get_rx_type() argument
1047 else if (!qp->has_rq) in get_rx_type()
1271 static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, in create_raw_packet_qp() argument
1275 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; in create_raw_packet_qp()
1284 if (qp->sq.wqe_cnt) { in create_raw_packet_qp()
1293 sq->base.container_mibqp = qp; in create_raw_packet_qp()
1296 if (qp->rq.wqe_cnt) { in create_raw_packet_qp()
1297 rq->base.container_mibqp = qp; in create_raw_packet_qp()
1309 qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn : in create_raw_packet_qp()
1317 if (!qp->sq.wqe_cnt) in create_raw_packet_qp()
1327 struct mlx5_ib_qp *qp) in destroy_raw_packet_qp() argument
1329 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; in destroy_raw_packet_qp()
1333 if (qp->rq.wqe_cnt) { in destroy_raw_packet_qp()
1334 destroy_raw_packet_qp_tir(dev, rq, qp->ibqp.pd); in destroy_raw_packet_qp()
1338 if (qp->sq.wqe_cnt) { in destroy_raw_packet_qp()
1340 destroy_raw_packet_qp_tis(dev, sq, qp->ibqp.pd); in destroy_raw_packet_qp()
1344 static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp, in raw_packet_qp_copy_info() argument
1350 sq->sq = &qp->sq; in raw_packet_qp_copy_info()
1351 rq->rq = &qp->rq; in raw_packet_qp_copy_info()
1352 sq->doorbell = &qp->db; in raw_packet_qp_copy_info()
1353 rq->doorbell = &qp->db; in raw_packet_qp_copy_info()
1356 static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) in destroy_rss_raw_qp_tir() argument
1358 mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn, in destroy_rss_raw_qp_tir()
1359 to_mpd(qp->ibqp.pd)->uid); in destroy_rss_raw_qp_tir()
1362 static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, in create_rss_raw_qp_tir() argument
1523 err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn); in create_rss_raw_qp_tir()
1529 /* qpn is reserved for that QP */ in create_rss_raw_qp_tir()
1530 qp->trans_qp.base.mqp.qpn = 0; in create_rss_raw_qp_tir()
1531 qp->flags |= MLX5_IB_QP_RSS; in create_rss_raw_qp_tir()
1588 struct ib_udata *udata, struct mlx5_ib_qp *qp) in create_qp_common() argument
1605 &qp->raw_packet_qp.rq.base : in create_qp_common()
1606 &qp->trans_qp.base; in create_qp_common()
1609 mlx5_ib_odp_create_qp(qp); in create_qp_common()
1611 mutex_init(&qp->mutex); in create_qp_common()
1612 spin_lock_init(&qp->sq.lock); in create_qp_common()
1613 spin_lock_init(&qp->rq.lock); in create_qp_common()
1619 err = create_rss_raw_qp_tir(dev, qp, pd, init_attr, udata); in create_qp_common()
1628 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; in create_qp_common()
1641 qp->flags |= MLX5_IB_QP_CROSS_CHANNEL; in create_qp_common()
1643 qp->flags |= MLX5_IB_QP_MANAGED_SEND; in create_qp_common()
1645 qp->flags |= MLX5_IB_QP_MANAGED_RECV; in create_qp_common()
1651 mlx5_ib_dbg(dev, "ipoib UD lso qp isn't supported\n"); in create_qp_common()
1665 qp->flags |= MLX5_IB_QP_CAP_SCATTER_FCS; in create_qp_common()
1669 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; in create_qp_common()
1682 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE); in create_qp_common()
1683 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE); in create_qp_common()
1685 qp->wq_sig = !!wq_signature; in create_qp_common()
1688 qp->has_rq = qp_has_rq(init_attr); in create_qp_common()
1689 err = set_rq_size(dev, &init_attr->cap, qp->has_rq, in create_qp_common()
1690 qp, (pd && pd->uobject) ? &ucmd : NULL); in create_qp_common()
1701 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift || in create_qp_common()
1702 ucmd.rq_wqe_count != qp->rq.wqe_cnt) { in create_qp_common()
1716 err = create_user_qp(dev, pd, qp, udata, init_attr, &in, in create_qp_common()
1721 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen, in create_qp_common()
1734 qp->create_type = MLX5_QP_EMPTY; in create_qp_common()
1738 qp->port = init_attr->port_num; in create_qp_common()
1751 if (qp->wq_sig) in create_qp_common()
1754 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) in create_qp_common()
1757 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) in create_qp_common()
1759 if (qp->flags & MLX5_IB_QP_MANAGED_SEND) in create_qp_common()
1761 if (qp->flags & MLX5_IB_QP_MANAGED_RECV) in create_qp_common()
1764 if (qp->scat_cqe && is_connected(init_attr->qp_type)) { in create_qp_common()
1784 if (qp->rq.wqe_cnt) { in create_qp_common()
1785 MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); in create_qp_common()
1786 MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt)); in create_qp_common()
1792 MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr)); in create_qp_common()
1794 if (qp->sq.wqe_cnt) in create_qp_common()
1795 MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); in create_qp_common()
1828 MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); in create_qp_common()
1834 /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */ in create_qp_common()
1838 qp->flags |= MLX5_IB_QP_LSO; in create_qp_common()
1842 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr; in create_qp_common()
1843 raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); in create_qp_common()
1844 err = create_raw_packet_qp(dev, qp, in, pd); in create_qp_common()
1850 mlx5_ib_dbg(dev, "create qp failed\n"); in create_qp_common()
1856 base->container_mibqp = qp; in create_qp_common()
1866 list_add_tail(&qp->qps_list, &dev->qp_list); in create_qp_common()
1870 list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp); in create_qp_common()
1872 list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp); in create_qp_common()
1879 if (qp->create_type == MLX5_QP_USER) in create_qp_common()
1880 destroy_qp_user(dev, pd, qp, base, udata); in create_qp_common()
1881 else if (qp->create_type == MLX5_QP_KERNEL) in create_qp_common()
1882 destroy_qp_kernel(dev, qp); in create_qp_common()
1946 static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp) in get_pd() argument
1948 return to_mpd(qp->ibqp.pd); in get_pd()
1986 static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1990 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, in destroy_qp_common() argument
1994 struct mlx5_ib_qp_base *base = &qp->trans_qp.base; in destroy_qp_common()
1998 if (qp->ibqp.rwq_ind_tbl) { in destroy_qp_common()
1999 destroy_rss_raw_qp_tir(dev, qp); in destroy_qp_common()
2003 base = qp->ibqp.qp_type == IB_QPT_RAW_PACKET ? in destroy_qp_common()
2004 &qp->raw_packet_qp.rq.base : in destroy_qp_common()
2005 &qp->trans_qp.base; in destroy_qp_common()
2007 if (qp->state != IB_QPS_RESET) { in destroy_qp_common()
2008 if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) { in destroy_qp_common()
2009 mlx5_ib_qp_disable_pagefaults(qp); in destroy_qp_common()
2018 err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0); in destroy_qp_common()
2021 mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n", in destroy_qp_common()
2025 get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq, in destroy_qp_common()
2031 list_del(&qp->qps_list); in destroy_qp_common()
2033 list_del(&qp->cq_send_list); in destroy_qp_common()
2036 list_del(&qp->cq_recv_list); in destroy_qp_common()
2038 if (qp->create_type == MLX5_QP_KERNEL) { in destroy_qp_common()
2040 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); in destroy_qp_common()
2048 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) { in destroy_qp_common()
2049 destroy_raw_packet_qp(dev, qp); in destroy_qp_common()
2053 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", in destroy_qp_common()
2057 if (qp->create_type == MLX5_QP_KERNEL) in destroy_qp_common()
2058 destroy_qp_kernel(dev, qp); in destroy_qp_common()
2059 else if (qp->create_type == MLX5_QP_USER) in destroy_qp_common()
2060 destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base, udata); in destroy_qp_common()
2090 return "Invalid QP type"; in ib_qp_type_str()
2099 struct mlx5_ib_qp *qp; in mlx5_ib_create_qp() local
2108 mlx5_ib_dbg(dev, "Raw Packet QP is not supported for kernel consumers\n"); in mlx5_ib_create_qp()
2111 mlx5_ib_dbg(dev, "Raw Packet QP is only supported for CQE version > 0\n"); in mlx5_ib_create_qp()
2147 qp = kzalloc(sizeof(*qp), GFP_KERNEL); in mlx5_ib_create_qp()
2148 if (!qp) in mlx5_ib_create_qp()
2151 err = create_qp_common(dev, pd, init_attr, udata, qp); in mlx5_ib_create_qp()
2154 kfree(qp); in mlx5_ib_create_qp()
2159 qp->ibqp.qp_num = 0; in mlx5_ib_create_qp()
2161 qp->ibqp.qp_num = 1; in mlx5_ib_create_qp()
2163 qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn; in mlx5_ib_create_qp()
2166 qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, in mlx5_ib_create_qp()
2170 qp->trans_qp.xrcdn = xrcdn; in mlx5_ib_create_qp()
2181 mlx5_ib_dbg(dev, "unsupported qp type %d\n", in mlx5_ib_create_qp()
2187 return &qp->ibqp; in mlx5_ib_create_qp()
2190 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) in mlx5_ib_destroy_qp() argument
2192 struct mlx5_ib_dev *dev = to_mdev(qp->device); in mlx5_ib_destroy_qp()
2193 struct mlx5_ib_qp *mqp = to_mqp(qp); in mlx5_ib_destroy_qp()
2195 if (unlikely(qp->qp_type == IB_QPT_GSI)) in mlx5_ib_destroy_qp()
2196 return mlx5_ib_gsi_destroy_qp(qp); in mlx5_ib_destroy_qp()
2205 static int to_mlx5_access_flags(struct mlx5_ib_qp *qp, in to_mlx5_access_flags() argument
2212 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); in to_mlx5_access_flags()
2217 dest_rd_atomic = qp->trans_qp.resp_depth; in to_mlx5_access_flags()
2222 access_flags = qp->trans_qp.atomic_rd_en; in to_mlx5_access_flags()
2232 atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type); in to_mlx5_access_flags()
2324 static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, in mlx5_set_path() argument
2390 if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) in mlx5_set_path()
2392 &qp->raw_packet_qp.sq, in mlx5_set_path()
2393 ah->sl & 0xf, qp->ibqp.pd); in mlx5_set_path()
2568 pr_info_once("%s: RAW PACKET QP counters are not supported on current FW\n", in modify_raw_packet_qp_rq()
2615 static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, in modify_raw_packet_qp() argument
2619 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; in modify_raw_packet_qp()
2622 int modify_rq = !!qp->rq.wqe_cnt; in modify_raw_packet_qp()
2623 int modify_sq = !!qp->sq.wqe_cnt; in modify_raw_packet_qp()
2657 qp->ibqp.pd); in modify_raw_packet_qp()
2666 qp->ibqp.pd); in modify_raw_packet_qp()
2671 return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, qp->ibqp.pd); in modify_raw_packet_qp()
2719 struct mlx5_ib_qp *qp = to_mqp(ibqp); in __mlx5_ib_modify_qp() local
2720 struct mlx5_ib_qp_base *base = &qp->trans_qp.base; in __mlx5_ib_modify_qp()
2737 mlx5_ib_dbg(dev, "unsupported qp type %d\n", ibqp->qp_type); in __mlx5_ib_modify_qp()
2784 context->pri_path.port = qp->port; in __mlx5_ib_modify_qp()
2790 err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path, in __mlx5_ib_modify_qp()
2791 attr_mask & IB_QP_PORT ? attr->port_num : qp->port, in __mlx5_ib_modify_qp()
2801 err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, in __mlx5_ib_modify_qp()
2810 pd = get_pd(qp); in __mlx5_ib_modify_qp()
2811 get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq, in __mlx5_ib_modify_qp()
2843 err = to_mlx5_access_flags(qp, attr, attr_mask, &access_flags); in __mlx5_ib_modify_qp()
2859 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) in __mlx5_ib_modify_qp()
2860 context->db_rec_addr = cpu_to_be64(qp->db.dma); in __mlx5_ib_modify_qp()
2864 qp->port) - 1; in __mlx5_ib_modify_qp()
2873 if (qp->flags & MLX5_IB_QP_SQPN_QP1) in __mlx5_ib_modify_qp()
2883 * this QP and flush all current page faults. Otherwise a stale page in __mlx5_ib_modify_qp()
2884 * fault may attempt to work on this QP after it is reset and moved in __mlx5_ib_modify_qp()
2889 (qp->ibqp.qp_type != IB_QPT_RAW_PACKET)) in __mlx5_ib_modify_qp()
2890 mlx5_ib_qp_disable_pagefaults(qp); in __mlx5_ib_modify_qp()
2900 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) { in __mlx5_ib_modify_qp()
2908 err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0); in __mlx5_ib_modify_qp()
2918 (qp->ibqp.qp_type != IB_QPT_RAW_PACKET)) in __mlx5_ib_modify_qp()
2919 mlx5_ib_qp_enable_pagefaults(qp); in __mlx5_ib_modify_qp()
2921 qp->state = new_state; in __mlx5_ib_modify_qp()
2924 qp->trans_qp.atomic_rd_en = attr->qp_access_flags; in __mlx5_ib_modify_qp()
2926 qp->trans_qp.resp_depth = attr->max_dest_rd_atomic; in __mlx5_ib_modify_qp()
2928 qp->port = attr->port_num; in __mlx5_ib_modify_qp()
2930 qp->trans_qp.alt_port = attr->alt_port_num; in __mlx5_ib_modify_qp()
2933 * If we moved a kernel QP to RESET, clean up all old CQ in __mlx5_ib_modify_qp()
2934 * entries and reinitialize the QP. in __mlx5_ib_modify_qp()
2942 qp->rq.head = 0; in __mlx5_ib_modify_qp()
2943 qp->rq.tail = 0; in __mlx5_ib_modify_qp()
2944 qp->sq.head = 0; in __mlx5_ib_modify_qp()
2945 qp->sq.tail = 0; in __mlx5_ib_modify_qp()
2946 qp->sq.cur_post = 0; in __mlx5_ib_modify_qp()
2947 qp->sq.last_poll = 0; in __mlx5_ib_modify_qp()
2948 qp->db.db[MLX5_RCV_DBR] = 0; in __mlx5_ib_modify_qp()
2949 qp->db.db[MLX5_SND_DBR] = 0; in __mlx5_ib_modify_qp()
2961 struct mlx5_ib_qp *qp = to_mqp(ibqp); in mlx5_ib_modify_qp() local
2976 mutex_lock(&qp->mutex); in mlx5_ib_modify_qp()
2978 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; in mlx5_ib_modify_qp()
2983 mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n", in mlx5_ib_modify_qp()
2997 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in mlx5_ib_modify_qp()
3030 mutex_unlock(&qp->mutex); in mlx5_ib_modify_qp()
3061 struct mlx5_ib_qp *qp, int *size) in set_eth_seg() argument
3100 seg = mlx5_get_send_wqe(qp, 0); in set_eth_seg()
3408 static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, in set_data_inl_seg() argument
3412 void *qend = qp->sq.qend; in set_data_inl_seg()
3426 if (unlikely(inl > qp->max_inline_data)) in set_data_inl_seg()
3434 wqe = mlx5_get_send_wqe(qp, 0); in set_data_inl_seg()
3555 struct mlx5_ib_qp *qp, void **seg, int *size) in set_sig_data_segment() argument
3639 if (unlikely((*seg == qp->sq.qend))) in set_sig_data_segment()
3640 *seg = mlx5_get_send_wqe(qp, 0); in set_sig_data_segment()
3649 if (unlikely((*seg == qp->sq.qend))) in set_sig_data_segment()
3650 *seg = mlx5_get_send_wqe(qp, 0); in set_sig_data_segment()
3687 static int set_sig_umr_wr(const struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp, in set_sig_umr_wr() argument
3692 u32 pdn = get_pd(qp)->pdn; in set_sig_umr_wr()
3698 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) || in set_sig_umr_wr()
3720 if (unlikely((*seg == qp->sq.qend))) in set_sig_umr_wr()
3721 *seg = mlx5_get_send_wqe(qp, 0); in set_sig_umr_wr()
3726 if (unlikely((*seg == qp->sq.qend))) in set_sig_umr_wr()
3727 *seg = mlx5_get_send_wqe(qp, 0); in set_sig_umr_wr()
3729 ret = set_sig_data_segment(wr, qp, seg, size); in set_sig_umr_wr()
3763 static int set_reg_wr(struct mlx5_ib_qp *qp, in set_reg_wr() argument
3768 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd); in set_reg_wr()
3771 mlx5_ib_warn(to_mdev(qp->ibqp.device), in set_reg_wr()
3779 if (unlikely((*seg == qp->sq.qend))) in set_reg_wr()
3780 *seg = mlx5_get_send_wqe(qp, 0); in set_reg_wr()
3785 if (unlikely((*seg == qp->sq.qend))) in set_reg_wr()
3786 *seg = mlx5_get_send_wqe(qp, 0); in set_reg_wr()
3795 static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size) in set_linv_wr() argument
3800 if (unlikely((*seg == qp->sq.qend))) in set_linv_wr()
3801 *seg = mlx5_get_send_wqe(qp, 0); in set_linv_wr()
3805 if (unlikely((*seg == qp->sq.qend))) in set_linv_wr()
3806 *seg = mlx5_get_send_wqe(qp, 0); in set_linv_wr()
3809 static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) in dump_wqe() argument
3815 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx)); in dump_wqe()
3818 void *buf = mlx5_get_send_wqe(qp, tidx); in dump_wqe()
3819 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1); in dump_wqe()
3847 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, in begin_wqe() argument
3852 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) in begin_wqe()
3855 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); in begin_wqe()
3856 *seg = mlx5_get_send_wqe(qp, *idx); in begin_wqe()
3860 (*ctrl)->fm_ce_se = qp->sq_signal_bits | in begin_wqe()
3872 static void finish_wqe(struct mlx5_ib_qp *qp, in finish_wqe() argument
3880 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | in finish_wqe()
3882 ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); in finish_wqe()
3884 qp->fm_cache = next_fence; in finish_wqe()
3885 if (unlikely(qp->wq_sig)) in finish_wqe()
3888 qp->sq.wrid[idx] = wr_id; in finish_wqe()
3889 qp->sq.w_list[idx].opcode = mlx5_opcode; in finish_wqe()
3890 qp->sq.wqe_head[idx] = qp->sq.head + nreq; in finish_wqe()
3891 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); in finish_wqe()
3892 qp->sq.w_list[idx].next = qp->sq.cur_post; in finish_wqe()
3902 struct mlx5_ib_qp *qp; in mlx5_ib_post_send() local
3922 qp = to_mqp(ibqp); in mlx5_ib_post_send()
3923 bf = &qp->bf; in mlx5_ib_post_send()
3924 qend = qp->sq.qend; in mlx5_ib_post_send()
3926 spin_lock_irqsave(&qp->sq.lock, flags); in mlx5_ib_post_send()
3943 fence = qp->fm_cache; in mlx5_ib_post_send()
3945 if (unlikely(num_sge > qp->sq.max_gs)) { in mlx5_ib_post_send()
3952 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq, wr->send_flags); in mlx5_ib_post_send()
3987 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; in mlx5_ib_post_send()
3989 set_linv_wr(qp, &seg, &size); in mlx5_ib_post_send()
3995 qp->sq.wr_data[idx] = IB_WR_REG_MR; in mlx5_ib_post_send()
3997 err = set_reg_wr(qp, reg_wr(wr), &seg, &size); in mlx5_ib_post_send()
4006 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR; in mlx5_ib_post_send()
4010 err = set_sig_umr_wr(wr, qp, &seg, &size); in mlx5_ib_post_send()
4017 finish_wqe(qp, ctrl, size, idx, wr->wr_id, in mlx5_ib_post_send()
4024 err = begin_wqe(qp, &seg, &ctrl, wr, in mlx5_ib_post_send()
4042 finish_wqe(qp, ctrl, size, idx, wr->wr_id, in mlx5_ib_post_send()
4045 err = begin_wqe(qp, &seg, &ctrl, wr, in mlx5_ib_post_send()
4064 finish_wqe(qp, ctrl, size, idx, wr->wr_id, in mlx5_ib_post_send()
4096 seg = mlx5_get_send_wqe(qp, 0); in mlx5_ib_post_send()
4104 seg = mlx5_get_send_wqe(qp, 0); in mlx5_ib_post_send()
4106 /* handle qp that supports ud offload */ in mlx5_ib_post_send()
4107 if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) { in mlx5_ib_post_send()
4115 seg = set_eth_seg(seg, wr, qend, qp, &size); in mlx5_ib_post_send()
4118 seg = mlx5_get_send_wqe(qp, 0); in mlx5_ib_post_send()
4127 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; in mlx5_ib_post_send()
4133 seg = mlx5_get_send_wqe(qp, 0); in mlx5_ib_post_send()
4138 seg = mlx5_get_send_wqe(qp, 0); in mlx5_ib_post_send()
4148 err = set_data_inl_seg(qp, wr, seg, &sz); in mlx5_ib_post_send()
4159 seg = mlx5_get_send_wqe(qp, 0); in mlx5_ib_post_send()
4170 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, in mlx5_ib_post_send()
4175 dump_wqe(qp, idx, size); in mlx5_ib_post_send()
4180 qp->sq.head += nreq; in mlx5_ib_post_send()
4187 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); in mlx5_ib_post_send()
4201 spin_unlock_irqrestore(&qp->sq.lock, flags); in mlx5_ib_post_send()
4214 struct mlx5_ib_qp *qp = to_mqp(ibqp); in mlx5_ib_post_recv() local
4228 spin_lock_irqsave(&qp->rq.lock, flags); in mlx5_ib_post_recv()
4237 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); in mlx5_ib_post_recv()
4240 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { in mlx5_ib_post_recv()
4246 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mlx5_ib_post_recv()
4252 scat = get_recv_wqe(qp, ind); in mlx5_ib_post_recv()
4253 if (qp->wq_sig) in mlx5_ib_post_recv()
4259 if (i < qp->rq.max_gs) { in mlx5_ib_post_recv()
4265 if (qp->wq_sig) { in mlx5_ib_post_recv()
4267 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2); in mlx5_ib_post_recv()
4270 qp->rq.wrid[ind] = wr->wr_id; in mlx5_ib_post_recv()
4272 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); in mlx5_ib_post_recv()
4277 qp->rq.head += nreq; in mlx5_ib_post_recv()
4284 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); in mlx5_ib_post_recv()
4287 spin_unlock_irqrestore(&qp->rq.lock, flags); in mlx5_ib_post_recv()
4416 struct mlx5_ib_qp *qp, u8 *qp_state) in sqrq_state_to_qp_state() argument
4448 WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x", in sqrq_state_to_qp_state()
4449 qp->raw_packet_qp.sq.base.mqp.qpn, sq_state, in sqrq_state_to_qp_state()
4450 qp->raw_packet_qp.rq.base.mqp.qpn, rq_state); in sqrq_state_to_qp_state()
4455 *qp_state = qp->state; in sqrq_state_to_qp_state()
4461 struct mlx5_ib_qp *qp, in query_raw_packet_qp_state() argument
4464 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; in query_raw_packet_qp_state()
4471 if (qp->sq.wqe_cnt) { in query_raw_packet_qp_state()
4477 if (qp->rq.wqe_cnt) { in query_raw_packet_qp_state()
4483 return sqrq_state_to_qp_state(sq_state, rq_state, qp, in query_raw_packet_qp_state()
4487 static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, in query_qp_attr() argument
4500 err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb, in query_qp_attr()
4510 qp->state = to_ib_qp_state(mlx5_state); in query_qp_attr()
4521 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { in query_qp_attr()
4532 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ in query_qp_attr()
4555 struct mlx5_ib_qp *qp = to_mqp(ibqp); in mlx5_ib_query_qp() local
4574 mutex_lock(&qp->mutex); in mlx5_ib_query_qp()
4576 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) { in mlx5_ib_query_qp()
4577 err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state); in mlx5_ib_query_qp()
4580 qp->state = raw_packet_qp_state; in mlx5_ib_query_qp()
4583 err = query_qp_attr(dev, qp, qp_attr); in mlx5_ib_query_qp()
4588 qp_attr->qp_state = qp->state; in mlx5_ib_query_qp()
4590 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; in mlx5_ib_query_qp()
4591 qp_attr->cap.max_recv_sge = qp->rq.max_gs; in mlx5_ib_query_qp()
4594 qp_attr->cap.max_send_wr = qp->sq.max_post; in mlx5_ib_query_qp()
4595 qp_attr->cap.max_send_sge = qp->sq.max_gs; in mlx5_ib_query_qp()
4606 qp_attr->cap.max_inline_data = qp->max_inline_data; in mlx5_ib_query_qp()
4611 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) in mlx5_ib_query_qp()
4614 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) in mlx5_ib_query_qp()
4616 if (qp->flags & MLX5_IB_QP_MANAGED_SEND) in mlx5_ib_query_qp()
4618 if (qp->flags & MLX5_IB_QP_MANAGED_RECV) in mlx5_ib_query_qp()
4620 if (qp->flags & MLX5_IB_QP_SQPN_QP1) in mlx5_ib_query_qp()
4623 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ? in mlx5_ib_query_qp()
4627 mutex_unlock(&qp->mutex); in mlx5_ib_query_qp()