Lines Matching +full:down +full:- +full:counters

1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
9 #include "counters.h"
150 static int mlx5_ib_read_counters(struct ib_counters *counters, in mlx5_ib_read_counters() argument
154 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); in mlx5_ib_read_counters()
159 mutex_lock(&mcounters->mcntrs_mutex); in mlx5_ib_read_counters()
160 if (mcounters->cntrs_max_index > read_attr->ncounters) { in mlx5_ib_read_counters()
161 ret = -EINVAL; in mlx5_ib_read_counters()
165 mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64), in mlx5_ib_read_counters()
168 ret = -ENOMEM; in mlx5_ib_read_counters()
172 mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl; in mlx5_ib_read_counters()
173 mread_attr.flags = read_attr->flags; in mlx5_ib_read_counters()
174 ret = mcounters->read_counters(counters->device, &mread_attr); in mlx5_ib_read_counters()
178 /* do the pass over the counters data array to assign according to the in mlx5_ib_read_counters()
181 desc = mcounters->counters_data; in mlx5_ib_read_counters()
182 for (i = 0; i < mcounters->ncounters; i++) in mlx5_ib_read_counters()
183 read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description]; in mlx5_ib_read_counters()
188 mutex_unlock(&mcounters->mcntrs_mutex); in mlx5_ib_read_counters()
192 static int mlx5_ib_destroy_counters(struct ib_counters *counters) in mlx5_ib_destroy_counters() argument
194 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); in mlx5_ib_destroy_counters()
196 mlx5_ib_counters_clear_description(counters); in mlx5_ib_destroy_counters()
197 if (mcounters->hw_cntrs_hndl) in mlx5_ib_destroy_counters()
198 mlx5_fc_destroy(to_mdev(counters->device)->mdev, in mlx5_ib_destroy_counters()
199 mcounters->hw_cntrs_hndl); in mlx5_ib_destroy_counters()
203 static int mlx5_ib_create_counters(struct ib_counters *counters, in mlx5_ib_create_counters() argument
206 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); in mlx5_ib_create_counters()
208 mutex_init(&mcounters->mcntrs_mutex); in mlx5_ib_create_counters()
214 return MLX5_CAP_GEN(dev->mdev, q_counter_other_vport) && in vport_qcounters_supported()
215 MLX5_CAP_GEN(dev->mdev, q_counter_aggregation); in vport_qcounters_supported()
221 if ((is_mdev_switchdev_mode(dev->mdev) && in get_counters()
223 return &dev->port[0].cnts; in get_counters()
225 return is_mdev_switchdev_mode(dev->mdev) ? in get_counters()
226 &dev->port[1].cnts : &dev->port[port_num - 1].cnts; in get_counters()
230 * mlx5_ib_get_counters_id - Returns counters id to use for device+port
234 * mlx5_ib_get_counters_id() Returns counters set id to use for given
242 return cnts->set_id; in mlx5_ib_get_counters_id()
251 num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters + in do_alloc_stats()
252 cnts->num_ext_ppcnt_counters; in do_alloc_stats()
253 stats = rdma_alloc_hw_stats_struct(cnts->descs, in do_alloc_stats()
255 cnts->num_op_counters, in do_alloc_stats()
260 for (i = 0; i < cnts->num_op_counters; i++) in do_alloc_stats()
261 set_bit(num_hw_counters + i, stats->is_disabled); in do_alloc_stats()
270 const struct mlx5_ib_counters *cnts = &dev->port[0].cnts; in mlx5_ib_alloc_hw_device_stats()
300 for (i = 0; i < cnts->num_q_counters; i++) { in mlx5_ib_query_q_counters()
301 val = *(__be32 *)((void *)out + cnts->offsets[i]); in mlx5_ib_query_q_counters()
302 stats->value[i] = (u64)be32_to_cpu(val); in mlx5_ib_query_q_counters()
312 int offset = cnts->num_q_counters + cnts->num_cong_counters; in mlx5_ib_query_ext_ppcnt_counters()
320 return -ENOMEM; in mlx5_ib_query_ext_ppcnt_counters()
324 ret = mlx5_core_access_reg(dev->mdev, in, sz, out, sz, MLX5_REG_PPCNT, in mlx5_ib_query_ext_ppcnt_counters()
329 for (i = 0; i < cnts->num_ext_ppcnt_counters; i++) in mlx5_ib_query_ext_ppcnt_counters()
330 stats->value[i + offset] = in mlx5_ib_query_ext_ppcnt_counters()
332 cnts->offsets[i + offset])); in mlx5_ib_query_ext_ppcnt_counters()
350 if (!dev->port[port_num].rep || in mlx5_ib_query_q_counters_vport()
351 dev->port[port_num].rep->vport == MLX5_VPORT_UPLINK) in mlx5_ib_query_q_counters_vport()
354 mdev = mlx5_eswitch_get_core_dev(dev->port[port_num].rep->esw); in mlx5_ib_query_q_counters_vport()
356 return -EOPNOTSUPP; in mlx5_ib_query_q_counters_vport()
361 dev->port[port_num].rep->vport); in mlx5_ib_query_q_counters_vport()
367 for (i = 0; i < cnts->num_q_counters; i++) { in mlx5_ib_query_q_counters_vport()
368 val = *(__be32 *)((void *)out + cnts->offsets[i]); in mlx5_ib_query_q_counters_vport()
369 stats->value[i] = (u64)be32_to_cpu(val); in mlx5_ib_query_q_counters_vport()
385 return -EINVAL; in do_get_hw_stats()
387 num_counters = cnts->num_q_counters + in do_get_hw_stats()
388 cnts->num_cong_counters + in do_get_hw_stats()
389 cnts->num_ext_ppcnt_counters; in do_get_hw_stats()
391 if (is_mdev_switchdev_mode(dev->mdev) && dev->is_rep && port_num != 0) in do_get_hw_stats()
392 ret = mlx5_ib_query_q_counters_vport(dev, port_num - 1, cnts, in do_get_hw_stats()
395 ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats, in do_get_hw_stats()
396 cnts->set_id); in do_get_hw_stats()
400 /* We don't expose device counters over Vports */ in do_get_hw_stats()
401 if (is_mdev_switchdev_mode(dev->mdev) && port_num != 0) in do_get_hw_stats()
404 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { in do_get_hw_stats()
410 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { in do_get_hw_stats()
415 /* If port is not affiliated yet, its in down state in do_get_hw_stats()
416 * which doesn't have any counters yet, so it would be in do_get_hw_stats()
421 ret = mlx5_lag_query_cong_counters(dev->mdev, in do_get_hw_stats()
422 stats->value + in do_get_hw_stats()
423 cnts->num_q_counters, in do_get_hw_stats()
424 cnts->num_cong_counters, in do_get_hw_stats()
425 cnts->offsets + in do_get_hw_stats()
426 cnts->num_q_counters); in do_get_hw_stats()
450 struct mlx5_ib_dev *dev = to_mdev(counter->device); in do_per_qp_get_op_stat()
451 const struct mlx5_ib_counters *cnts = get_counters(dev, counter->port); in do_per_qp_get_op_stat()
458 if (!mcounter->fc[i]) in do_per_qp_get_op_stat()
461 ret = mlx5_fc_query(dev->mdev, mcounter->fc[i], in do_per_qp_get_op_stat()
466 num_hw_counters = cnts->num_q_counters + in do_per_qp_get_op_stat()
467 cnts->num_cong_counters + in do_per_qp_get_op_stat()
468 cnts->num_ext_ppcnt_counters; in do_per_qp_get_op_stat()
470 index = i - MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP + in do_per_qp_get_op_stat()
474 counter->stats->value[index] = bytes; in do_per_qp_get_op_stat()
476 counter->stats->value[index] = packets; in do_per_qp_get_op_stat()
478 clear_bit(index, counter->stats->is_disabled); in do_per_qp_get_op_stat()
496 opfcs = cnts->opfcs; in do_get_op_stat()
497 type = *(u32 *)cnts->descs[index].priv; in do_get_op_stat()
499 return -EINVAL; in do_get_op_stat()
504 ret = mlx5_fc_query(dev->mdev, opfcs[type].fc, in do_get_op_stat()
510 stats->value[index] = bytes; in do_get_op_stat()
512 stats->value[index] = packets; in do_get_op_stat()
526 num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters + in do_get_op_stats()
527 cnts->num_ext_ppcnt_counters; in do_get_op_stats()
529 index < (num_hw_counters + cnts->num_op_counters); index++) { in do_get_op_stats()
535 return cnts->num_op_counters; in do_get_op_stats()
547 num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters + in mlx5_ib_get_hw_stats()
548 cnts->num_ext_ppcnt_counters; in mlx5_ib_get_hw_stats()
549 num_counters = num_hw_counters + cnts->num_op_counters; in mlx5_ib_get_hw_stats()
552 return -EINVAL; in mlx5_ib_get_hw_stats()
572 struct mlx5_ib_dev *dev = to_mdev(counter->device); in mlx5_ib_counter_alloc_stats()
573 const struct mlx5_ib_counters *cnts = get_counters(dev, counter->port); in mlx5_ib_counter_alloc_stats()
580 struct mlx5_ib_dev *dev = to_mdev(counter->device); in mlx5_ib_counter_update_stats()
581 const struct mlx5_ib_counters *cnts = get_counters(dev, counter->port); in mlx5_ib_counter_update_stats()
584 ret = mlx5_ib_query_q_counters(dev->mdev, cnts, counter->stats, in mlx5_ib_counter_update_stats()
585 counter->id); in mlx5_ib_counter_update_stats()
589 if (!counter->mode.bind_opcnt) in mlx5_ib_counter_update_stats()
598 struct mlx5_ib_dev *dev = to_mdev(counter->device); in mlx5_ib_counter_dealloc()
601 if (!counter->id) in mlx5_ib_counter_dealloc()
604 WARN_ON(!xa_empty(&mcounter->qpn_opfc_xa)); in mlx5_ib_counter_dealloc()
608 MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter->id); in mlx5_ib_counter_dealloc()
609 return mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in); in mlx5_ib_counter_dealloc()
615 struct mlx5_ib_dev *dev = to_mdev(qp->device); in mlx5_ib_counter_bind_qp()
619 if (!counter->id) { in mlx5_ib_counter_bind_qp()
626 err = mlx5_cmd_exec_inout(dev->mdev, alloc_q_counter, in, out); in mlx5_ib_counter_bind_qp()
629 counter->id = in mlx5_ib_counter_bind_qp()
649 counter->id = 0; in mlx5_ib_counter_bind_qp()
657 struct rdma_counter *counter = qp->counter; in mlx5_ib_counter_unbind_qp()
677 bool is_vport = is_mdev_switchdev_mode(dev->mdev) && in mlx5_ib_fill_counters()
693 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) { in mlx5_ib_fill_counters()
703 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) { in mlx5_ib_fill_counters()
713 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) { in mlx5_ib_fill_counters()
723 if (MLX5_CAP_GEN(dev->mdev, roce_accl)) { in mlx5_ib_fill_counters()
733 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { in mlx5_ib_fill_counters()
740 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { in mlx5_ib_fill_counters()
753 if (MLX5_CAP_FLOWTABLE(dev->mdev, in mlx5_ib_fill_counters()
762 if (MLX5_CAP_FLOWTABLE(dev->mdev, in mlx5_ib_fill_counters()
782 bool is_vport = is_mdev_switchdev_mode(dev->mdev) && in __mlx5_ib_alloc_counters()
792 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) in __mlx5_ib_alloc_counters()
797 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) in __mlx5_ib_alloc_counters()
802 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) in __mlx5_ib_alloc_counters()
807 if (MLX5_CAP_GEN(dev->mdev, roce_accl)) in __mlx5_ib_alloc_counters()
810 cnts->num_q_counters = num_counters; in __mlx5_ib_alloc_counters()
815 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { in __mlx5_ib_alloc_counters()
816 cnts->num_cong_counters = ARRAY_SIZE(cong_cnts); in __mlx5_ib_alloc_counters()
819 if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { in __mlx5_ib_alloc_counters()
820 cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts); in __mlx5_ib_alloc_counters()
828 if (MLX5_CAP_FLOWTABLE(dev->mdev, in __mlx5_ib_alloc_counters()
832 if (MLX5_CAP_FLOWTABLE(dev->mdev, in __mlx5_ib_alloc_counters()
837 cnts->num_op_counters = num_op_counters; in __mlx5_ib_alloc_counters()
839 cnts->descs = kcalloc(num_counters, in __mlx5_ib_alloc_counters()
841 if (!cnts->descs) in __mlx5_ib_alloc_counters()
842 return -ENOMEM; in __mlx5_ib_alloc_counters()
844 cnts->offsets = kcalloc(num_counters, in __mlx5_ib_alloc_counters()
845 sizeof(*cnts->offsets), GFP_KERNEL); in __mlx5_ib_alloc_counters()
846 if (!cnts->offsets) in __mlx5_ib_alloc_counters()
852 kfree(cnts->descs); in __mlx5_ib_alloc_counters()
853 cnts->descs = NULL; in __mlx5_ib_alloc_counters()
854 return -ENOMEM; in __mlx5_ib_alloc_counters()
898 if (!(*opfc)->fc) in mlx5r_is_opfc_shared_and_in_use()
907 int num_cnt_ports = dev->num_ports; in mlx5_ib_dealloc_counters()
911 if (is_mdev_switchdev_mode(dev->mdev)) in mlx5_ib_dealloc_counters()
918 if (dev->port[i].cnts.set_id) { in mlx5_ib_dealloc_counters()
920 dev->port[i].cnts.set_id); in mlx5_ib_dealloc_counters()
921 mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in); in mlx5_ib_dealloc_counters()
923 kfree(dev->port[i].cnts.descs); in mlx5_ib_dealloc_counters()
924 kfree(dev->port[i].cnts.offsets); in mlx5_ib_dealloc_counters()
927 if (!dev->port[i].cnts.opfcs[j].fc) in mlx5_ib_dealloc_counters()
931 dev->port[i].cnts.opfcs, j, &in_use_opfc)) in mlx5_ib_dealloc_counters()
935 &dev->port[i].cnts.opfcs[j], j); in mlx5_ib_dealloc_counters()
936 mlx5_fc_destroy(dev->mdev, in mlx5_ib_dealloc_counters()
937 dev->port[i].cnts.opfcs[j].fc); in mlx5_ib_dealloc_counters()
939 dev->port[i].cnts.opfcs[j].fc = NULL; in mlx5_ib_dealloc_counters()
948 int num_cnt_ports = dev->num_ports; in mlx5_ib_alloc_counters()
954 is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0; in mlx5_ib_alloc_counters()
962 if (is_mdev_switchdev_mode(dev->mdev)) in mlx5_ib_alloc_counters()
966 err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts, i); in mlx5_ib_alloc_counters()
970 mlx5_ib_fill_counters(dev, dev->port[i].cnts.descs, in mlx5_ib_alloc_counters()
971 dev->port[i].cnts.offsets, i); in mlx5_ib_alloc_counters()
976 err = mlx5_cmd_exec_inout(dev->mdev, alloc_q_counter, in, out); in mlx5_ib_alloc_counters()
984 dev->port[i].cnts.set_id = in mlx5_ib_alloc_counters()
997 struct mlx5_fc *fc = read_attr->hw_cntrs_hndl; in read_flow_counters()
1000 return mlx5_fc_query(dev->mdev, fc, in read_flow_counters()
1001 &read_attr->out[IB_COUNTER_PACKETS], in read_flow_counters()
1002 &read_attr->out[IB_COUNTER_BYTES]); in read_flow_counters()
1005 /* flow counters currently expose two counters packets and bytes */
1008 struct ib_counters *counters, enum mlx5_ib_counters_type counters_type, in counters_set_description() argument
1011 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); in counters_set_description()
1016 return -EINVAL; in counters_set_description()
1019 mcounters->type = counters_type; in counters_set_description()
1020 mcounters->read_counters = read_flow_counters; in counters_set_description()
1021 mcounters->counters_num = FLOW_COUNTERS_NUM; in counters_set_description()
1022 mcounters->ncounters = ncounters; in counters_set_description()
1026 return -EINVAL; in counters_set_description()
1032 mutex_lock(&mcounters->mcntrs_mutex); in counters_set_description()
1033 mcounters->counters_data = desc_data; in counters_set_description()
1034 mcounters->cntrs_max_index = cntrs_max_index; in counters_set_description()
1035 mutex_unlock(&mcounters->mcntrs_mutex); in counters_set_description()
1050 if (ucmd && ucmd->ncounters_data != 0) { in mlx5_ib_flow_counters_set_data()
1051 cntrs_data = ucmd->data; in mlx5_ib_flow_counters_set_data()
1052 if (cntrs_data->ncounters > MAX_COUNTERS_NUM) in mlx5_ib_flow_counters_set_data()
1053 return -EINVAL; in mlx5_ib_flow_counters_set_data()
1055 desc_data = kcalloc(cntrs_data->ncounters, in mlx5_ib_flow_counters_set_data()
1059 return -ENOMEM; in mlx5_ib_flow_counters_set_data()
1062 u64_to_user_ptr(cntrs_data->counters_data), in mlx5_ib_flow_counters_set_data()
1063 sizeof(*desc_data) * cntrs_data->ncounters)) { in mlx5_ib_flow_counters_set_data()
1064 ret = -EFAULT; in mlx5_ib_flow_counters_set_data()
1069 if (!mcounters->hw_cntrs_hndl) { in mlx5_ib_flow_counters_set_data()
1070 mcounters->hw_cntrs_hndl = mlx5_fc_create( in mlx5_ib_flow_counters_set_data()
1071 to_mdev(ibcounters->device)->mdev, false); in mlx5_ib_flow_counters_set_data()
1072 if (IS_ERR(mcounters->hw_cntrs_hndl)) { in mlx5_ib_flow_counters_set_data()
1073 ret = PTR_ERR(mcounters->hw_cntrs_hndl); in mlx5_ib_flow_counters_set_data()
1080 /* counters already bound to at least one flow */ in mlx5_ib_flow_counters_set_data()
1081 if (mcounters->cntrs_max_index) { in mlx5_ib_flow_counters_set_data()
1082 ret = -EINVAL; in mlx5_ib_flow_counters_set_data()
1089 cntrs_data->ncounters); in mlx5_ib_flow_counters_set_data()
1093 } else if (!mcounters->cntrs_max_index) { in mlx5_ib_flow_counters_set_data()
1094 /* counters not bound yet, must have udata passed */ in mlx5_ib_flow_counters_set_data()
1095 ret = -EINVAL; in mlx5_ib_flow_counters_set_data()
1103 mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev, in mlx5_ib_flow_counters_set_data()
1104 mcounters->hw_cntrs_hndl); in mlx5_ib_flow_counters_set_data()
1105 mcounters->hw_cntrs_hndl = NULL; in mlx5_ib_flow_counters_set_data()
1112 void mlx5_ib_counters_clear_description(struct ib_counters *counters) in mlx5_ib_counters_clear_description() argument
1116 if (!counters || atomic_read(&counters->usecnt) != 1) in mlx5_ib_counters_clear_description()
1119 mcounters = to_mcounters(counters); in mlx5_ib_counters_clear_description()
1121 mutex_lock(&mcounters->mcntrs_mutex); in mlx5_ib_counters_clear_description()
1122 kfree(mcounters->counters_data); in mlx5_ib_counters_clear_description()
1123 mcounters->counters_data = NULL; in mlx5_ib_counters_clear_description()
1124 mcounters->cntrs_max_index = 0; in mlx5_ib_counters_clear_description()
1125 mutex_unlock(&mcounters->mcntrs_mutex); in mlx5_ib_counters_clear_description()
1137 cnts = &dev->port[port - 1].cnts; in mlx5_ib_modify_stat()
1138 num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters + in mlx5_ib_modify_stat()
1139 cnts->num_ext_ppcnt_counters; in mlx5_ib_modify_stat()
1141 index >= (num_hw_counters + cnts->num_op_counters)) in mlx5_ib_modify_stat()
1142 return -EINVAL; in mlx5_ib_modify_stat()
1144 if (!(cnts->descs[index].flags & IB_STAT_FLAG_OPTIONAL)) in mlx5_ib_modify_stat()
1145 return -EINVAL; in mlx5_ib_modify_stat()
1147 type = *(u32 *)cnts->descs[index].priv; in mlx5_ib_modify_stat()
1149 return -EINVAL; in mlx5_ib_modify_stat()
1151 opfc = &cnts->opfcs[type]; in mlx5_ib_modify_stat()
1154 if (opfc->fc) in mlx5_ib_modify_stat()
1155 return -EEXIST; in mlx5_ib_modify_stat()
1157 if (mlx5r_is_opfc_shared_and_in_use(cnts->opfcs, type, in mlx5_ib_modify_stat()
1159 opfc->fc = in_use_opfc->fc; in mlx5_ib_modify_stat()
1160 opfc->rule[0] = in_use_opfc->rule[0]; in mlx5_ib_modify_stat()
1164 opfc->fc = mlx5_fc_create(dev->mdev, false); in mlx5_ib_modify_stat()
1165 if (IS_ERR(opfc->fc)) in mlx5_ib_modify_stat()
1166 return PTR_ERR(opfc->fc); in mlx5_ib_modify_stat()
1170 mlx5_fc_destroy(dev->mdev, opfc->fc); in mlx5_ib_modify_stat()
1171 opfc->fc = NULL; in mlx5_ib_modify_stat()
1176 if (!opfc->fc) in mlx5_ib_modify_stat()
1177 return -EINVAL; in mlx5_ib_modify_stat()
1179 if (mlx5r_is_opfc_shared_and_in_use(cnts->opfcs, type, &in_use_opfc)) in mlx5_ib_modify_stat()
1183 mlx5_fc_destroy(dev->mdev, opfc->fc); in mlx5_ib_modify_stat()
1185 opfc->fc = NULL; in mlx5_ib_modify_stat()
1193 xa_init(&mcounter->qpn_opfc_xa); in mlx5_ib_counter_init()
1237 ib_set_device_ops(&dev->ib_dev, &counters_ops); in mlx5_ib_counters_init()
1239 if (!MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) in mlx5_ib_counters_init()
1242 if (is_mdev_switchdev_mode(dev->mdev)) { in mlx5_ib_counters_init()
1243 ib_set_device_ops(&dev->ib_dev, &hw_switchdev_stats_ops); in mlx5_ib_counters_init()
1245 ib_set_device_ops(&dev->ib_dev, &hw_switchdev_vport_op); in mlx5_ib_counters_init()
1247 ib_set_device_ops(&dev->ib_dev, &hw_stats_ops); in mlx5_ib_counters_init()
1253 if (!MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) in mlx5_ib_counters_cleanup()