Lines Matching refs:ucmd

448 			    struct mlx4_ib_create_qp *ucmd)
453 if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
456 if (ucmd->log_sq_stride >
458 ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
461 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
462 qp->sq.wqe_shift = ucmd->log_sq_stride;
554 struct mlx4_ib_create_qp_rss *ucmd)
559 if ((ucmd->rx_hash_function == MLX4_IB_RX_HASH_FUNC_TOEPLITZ) &&
561 memcpy(rss_ctx->rss_key, ucmd->rx_hash_key,
568 if (ucmd->rx_hash_fields_mask & ~(u64)(MLX4_IB_RX_HASH_SRC_IPV4 |
578 ucmd->rx_hash_fields_mask);
582 if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) &&
583 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
585 } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) ||
586 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
591 if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV6) &&
592 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV6)) {
594 } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV6) ||
595 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV6)) {
600 if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_UDP) &&
601 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_UDP)) {
615 } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_UDP) ||
616 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_UDP)) {
621 if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) &&
622 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
631 } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) ||
632 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
637 if (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_INNER) {
656 struct mlx4_ib_create_qp_rss *ucmd,
692 err = set_qp_rss(dev, qp->rss_ctx, init_attr, ucmd);
714 struct mlx4_ib_create_qp_rss ucmd = {};
726 required_cmd_sz = offsetof(typeof(ucmd), reserved1) +
727 sizeof(ucmd.reserved1);
733 if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
738 if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)))
741 if (ucmd.comp_mask || ucmd.reserved1)
744 if (udata->inlen > sizeof(ucmd) &&
745 !ib_is_udata_cleared(udata, sizeof(ucmd),
746 udata->inlen - sizeof(ucmd))) {
770 err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp);
1071 struct mlx4_ib_create_qp ucmd;
1078 if (ib_copy_from_udata(&ucmd, udata, copy_len)) {
1083 qp->inl_recv_sz = ucmd.inl_recv_sz;
1101 qp->sq_no_prefetch = ucmd.sq_no_prefetch;
1103 err = set_user_sq_size(dev, qp, &ucmd);
1108 ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0);
1129 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db);
4134 struct mlx4_ib_create_wq ucmd;
4140 required_cmd_sz = offsetof(typeof(ucmd), comp_mask) +
4141 sizeof(ucmd.comp_mask);
4147 if (udata->inlen > sizeof(ucmd) &&
4148 !ib_is_udata_cleared(udata, sizeof(ucmd),
4149 udata->inlen - sizeof(ucmd))) {
4272 struct mlx4_ib_modify_wq ucmd = {};
4277 required_cmd_sz = offsetof(typeof(ucmd), reserved) +
4278 sizeof(ucmd.reserved);
4282 if (udata->inlen > sizeof(ucmd) &&
4283 !ib_is_udata_cleared(udata, sizeof(ucmd),
4284 udata->inlen - sizeof(ucmd)))
4287 if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)))
4290 if (ucmd.comp_mask || ucmd.reserved)