Lines Matching refs:attr

58 int mlx5_query_device(struct ibv_context *context, struct ibv_device_attr *attr)
65 ret = ibv_cmd_query_device(context, attr, &raw_fw_ver, &cmd, sizeof cmd);
73 snprintf(attr->fw_ver, sizeof attr->fw_ver,
126 struct ibv_port_attr *attr)
130 return ibv_cmd_query_port(context, port, attr, &cmd, sizeof cmd);
626 struct ibv_srq_init_attr *attr)
650 if (attr->attr.max_wr > ctx->max_srq_recv_wr) {
652 attr->attr.max_wr, ctx->max_srq_recv_wr);
663 if (attr->attr.max_sge > max_sge) {
665 attr->attr.max_wr, ctx->max_srq_recv_wr);
670 srq->max = align_queue_size(attr->attr.max_wr + 1);
671 srq->max_gs = attr->attr.max_sge;
693 attr->attr.max_sge = srq->max_gs;
695 ret = ibv_cmd_create_srq(pd, ibsrq, attr, &cmd.ibv_cmd, sizeof(cmd),
733 struct ibv_srq_attr *attr,
738 return ibv_cmd_modify_srq(srq, attr, attr_mask, &cmd, sizeof cmd);
742 struct ibv_srq_attr *attr)
746 return ibv_cmd_query_srq(srq, attr, &cmd, sizeof cmd);
823 struct ibv_qp_init_attr_ex *attr,
831 size = sq_overhead(attr->qp_type);
835 if (attr->cap.max_inline_data) {
837 attr->cap.max_inline_data, 16);
840 if (attr->comp_mask & IBV_QP_INIT_ATTR_MAX_TSO_HEADER) {
841 size += align(attr->max_tso_header, 16);
842 qp->max_tso_header = attr->max_tso_header;
847 if (attr->cap.max_send_sge > max_gather)
850 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
860 struct ibv_qp_init_attr_ex *attr,
866 if (attr->srq)
869 num_scatter = max_t(uint32_t, attr->cap.max_recv_sge, 1);
883 struct ibv_qp_init_attr_ex *attr,
890 if (!attr->cap.max_send_wr)
893 wqe_size = mlx5_calc_send_wqe(ctx, attr, qp);
904 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
906 attr->cap.max_inline_data = qp->max_inline_data;
912 if (attr->cap.max_send_wr > 0x7fffffff / ctx->max_sq_desc_sz) {
917 wq_size = mlx5_round_up_power_of_two(attr->cap.max_send_wr * wqe_size);
925 qp->sq.max_gs = attr->cap.max_send_sge;
933 struct ibv_wq_init_attr *attr)
940 if (!attr->max_wr)
944 num_scatter = max_t(uint32_t, attr->max_sge, 1);
954 wq_size = mlx5_round_up_power_of_two(attr->max_wr) * wqe_size;
966 struct ibv_qp_init_attr_ex *attr,
974 if (!attr->cap.max_recv_wr)
977 if (attr->cap.max_recv_wr > ctx->max_recv_wr) {
982 wqe_size = mlx5_calc_rcv_wqe(ctx, attr, qp);
988 wq_size = mlx5_round_up_power_of_two(attr->cap.max_recv_wr) * wqe_size;
1007 struct ibv_qp_init_attr_ex *attr,
1013 ret = mlx5_calc_sq_size(ctx, attr, qp);
1018 ret = mlx5_calc_rq_size(ctx, attr, qp);
1050 struct ibv_qp_init_attr_ex *attr,
1113 if (attr->qp_type == IBV_QPT_RAW_PACKET) {
1171 struct ibv_qp_init_attr_ex *attr,
1178 if (attr->rx_hash_conf.rx_hash_key_len > sizeof(cmd_ex_rss.rx_hash_key)) {
1183 cmd_ex_rss.rx_hash_fields_mask = attr->rx_hash_conf.rx_hash_fields_mask;
1184 cmd_ex_rss.rx_hash_function = attr->rx_hash_conf.rx_hash_function;
1185 cmd_ex_rss.rx_key_len = attr->rx_hash_conf.rx_hash_key_len;
1186 memcpy(cmd_ex_rss.rx_hash_key, attr->rx_hash_conf.rx_hash_key,
1187 attr->rx_hash_conf.rx_hash_key_len);
1190 sizeof(qp->verbs_qp), attr,
1202 struct ibv_qp_init_attr_ex *attr,
1221 sizeof(qp->verbs_qp), attr,
1246 struct ibv_qp_init_attr_ex *attr)
1259 if (attr->comp_mask & ~MLX5_CREATE_QP_SUP_COMP_MASK)
1262 if ((attr->comp_mask & IBV_QP_INIT_ATTR_MAX_TSO_HEADER) &&
1263 (attr->qp_type != IBV_QPT_RAW_PACKET))
1278 if (attr->comp_mask & IBV_QP_INIT_ATTR_RX_HASH) {
1279 ret = mlx5_cmd_create_rss_qp(context, attr, qp);
1293 ret = mlx5_calc_wq_size(ctx, attr, qp);
1299 if (attr->qp_type == IBV_QPT_RAW_PACKET) {
1308 if (mlx5_alloc_qp_buf(context, attr, qp, ret)) {
1313 if (attr->qp_type == IBV_QPT_RAW_PACKET) {
1341 cmd.sq_buf_addr = (attr->qp_type == IBV_QPT_RAW_PACKET) ?
1354 } else if (!is_xrc_tgt(attr->qp_type)) {
1364 if (attr->comp_mask & MLX5_CREATE_QP_EX2_COMP_MASK)
1365 ret = mlx5_cmd_create_qp_ex(context, attr, &cmd, qp, &resp_ex);
1368 attr, &cmd.ibv_cmd, sizeof(cmd),
1375 uuar_index = (attr->comp_mask & MLX5_CREATE_QP_EX2_COMP_MASK) ?
1392 if (attr->sq_sig_all)
1397 attr->cap.max_send_wr = qp->sq.max_post;
1398 attr->cap.max_recv_wr = qp->rq.max_post;
1399 attr->cap.max_recv_sge = qp->rq.max_gs;
1402 qp->rsc.rsn = (ctx->cqe_version && !is_xrc_tgt(attr->qp_type)) ?
1413 else if (!is_xrc_tgt(attr->qp_type))
1435 struct ibv_qp_init_attr *attr)
1441 memcpy(&attrx, attr, sizeof(*attr));
1446 memcpy(attr, &attrx, sizeof(*attr));
1546 int mlx5_query_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr,
1556 ret = ibv_cmd_query_qp(ibqp, attr, attr_mask, init_attr, &cmd, sizeof(cmd));
1564 attr->cap = init_attr->cap;
1573 int mlx5_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
1590 if (context->cached_link_layer[attr->port_num - 1] ==
1611 ret = ibv_cmd_modify_qp_ex(qp, attr, attr_mask,
1617 ret = ibv_cmd_modify_qp(qp, attr, attr_mask,
1622 attr->qp_state == IBV_QPS_RESET) {
1647 attr->qp_state == IBV_QPS_RTR &&
1659 struct ibv_ah *mlx5_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr)
1669 if (attr->port_num < 1 || attr->port_num > ctx->num_ports)
1672 if (ctx->cached_link_layer[attr->port_num - 1]) {
1673 is_eth = ctx->cached_link_layer[attr->port_num - 1] ==
1676 if (ibv_query_port(pd->context, attr->port_num, &port_attr))
1682 if (unlikely((!attr->is_global) && is_eth)) {
1692 if (ibv_query_gid_type(pd->context, attr->port_num,
1693 attr->grh.sgid_index, &gid_type))
1705 ah->av.fl_mlid = attr->src_path_bits & 0x7f;
1706 ah->av.rlid = htobe16(attr->dlid);
1709 ah->av.stat_rate_sl = (attr->static_rate << 4) | attr->sl;
1710 if (attr->is_global) {
1711 ah->av.tclass = attr->grh.traffic_class;
1712 ah->av.hop_limit = attr->grh.hop_limit;
1714 ((attr->grh.sgid_index & 0xff) << 20) |
1715 (attr->grh.flow_label & 0xfffff));
1717 memcpy(ah->av.rgid, attr->grh.dgid.raw, 16);
1724 if (ibv_cmd_create_ah(pd, &ah->ibv_ah, attr, &resp.ibv_resp, sizeof(resp)))
1732 if (ibv_resolve_eth_l2_from_gid(pd->context, attr,
1770 struct ibv_qp_init_attr_ex *attr)
1772 return create_qp(context, attr);
1821 struct ibv_srq_init_attr_ex *attr)
1847 if (attr->attr.max_wr > ctx->max_srq_recv_wr) {
1849 __func__, __LINE__, attr->attr.max_wr,
1861 if (attr->attr.max_sge > max_sge) {
1863 __func__, __LINE__, attr->attr.max_wr,
1869 msrq->max = align_queue_size(attr->attr.max_wr + 1);
1870 msrq->max_gs = attr->attr.max_sge;
1892 attr->attr.max_sge = msrq->max_gs;
1906 attr, &cmd.ibv_cmd, sizeof(cmd),
1951 struct ibv_srq_init_attr_ex *attr)
1953 if (!(attr->comp_mask & IBV_SRQ_INIT_ATTR_TYPE) ||
1954 (attr->srq_type == IBV_SRQT_BASIC))
1955 return mlx5_create_srq(attr->pd,
1956 (struct ibv_srq_init_attr *)attr);
1957 else if (attr->srq_type == IBV_SRQT_XRC)
1958 return mlx5_create_xrc_srq(context, attr);
1965 struct ibv_device_attr_ex *attr,
1982 err = ibv_cmd_query_device_ex(context, input, attr, attr_size,
1990 attr->tso_caps = resp.tso_caps;
1991 attr->rss_caps.rx_hash_fields_mask = resp.rss_caps.rx_hash_fields_mask;
1992 attr->rss_caps.rx_hash_function = resp.rss_caps.rx_hash_function;
1993 attr->packet_pacing_caps = resp.packet_pacing_caps.caps;
2003 a = &attr->orig_attr;
2059 struct ibv_wq_init_attr *attr)
2070 if (attr->wq_type != IBV_WQT_RQ)
2088 ret = mlx5_calc_rwq_size(ctx, rwq, attr);
2122 err = ibv_cmd_create_wq(context, attr, &rwq->wq, &cmd.ibv_cmd,
2151 int mlx5_modify_wq(struct ibv_wq *wq, struct ibv_wq_attr *attr)
2156 if ((attr->attr_mask & IBV_WQ_ATTR_STATE) &&
2157 attr->wq_state == IBV_WQS_RDY) {
2158 if ((attr->attr_mask & IBV_WQ_ATTR_CURR_STATE) &&
2159 attr->curr_wq_state != wq->state)
2173 return ibv_cmd_modify_wq(wq, attr, &cmd.ibv_cmd, sizeof(cmd.ibv_cmd), sizeof(cmd));