Home
last modified time | relevance | path

Searched refs:MLX5_CAP_GEN (Results 1 – 25 of 89) sorted by relevance

1234

/linux/drivers/infiniband/hw/mlx5/
H A Dumr.h33 if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) in mlx5r_umr_can_load_pas()
40 if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) && in mlx5r_umr_can_load_pas()
58 MLX5_CAP_GEN(dev->mdev, atomic) && in mlx5r_umr_can_reconfig()
59 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) in mlx5r_umr_can_reconfig()
63 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) && in mlx5r_umr_can_reconfig()
64 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) in mlx5r_umr_can_reconfig()
68 (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) || in mlx5r_umr_can_reconfig()
69 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_pci_enabled)) && in mlx5r_umr_can_reconfig()
70 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) in mlx5r_umr_can_reconfig()
H A Dmain.c110 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type); in mlx5_ib_port_link_layer()
573 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg); in mlx5_query_port_roce()
691 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB) in mlx5_use_mad_ifc()
692 return !MLX5_CAP_GEN(dev->mdev, ib_virt); in mlx5_use_mad_ifc()
788 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, in mlx5_query_max_pkeys()
883 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); in mlx5_ib_query_device()
918 if (MLX5_CAP_GEN(mdev, pkv)) in mlx5_ib_query_device()
920 if (MLX5_CAP_GEN(mdev, qkv)) in mlx5_ib_query_device()
922 if (MLX5_CAP_GEN(mdev, apm)) in mlx5_ib_query_device()
924 if (MLX5_CAP_GEN(mdev, xrc)) in mlx5_ib_query_device()
[all …]
H A Dcounters.c207 return MLX5_CAP_GEN(dev->mdev, q_counter_other_vport) && in vport_qcounters_supported()
208 MLX5_CAP_GEN(dev->mdev, q_counter_aggregation); in vport_qcounters_supported()
403 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { in do_get_hw_stats()
607 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) { in mlx5_ib_fill_counters()
617 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) { in mlx5_ib_fill_counters()
627 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) { in mlx5_ib_fill_counters()
637 if (MLX5_CAP_GEN(dev->mdev, roce_accl)) { in mlx5_ib_fill_counters()
647 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { in mlx5_ib_fill_counters()
700 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) in __mlx5_ib_alloc_counters()
705 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) in __mlx5_ib_alloc_counters()
[all …]
H A Dqp.c326 if (!MLX5_CAP_GEN(dev->mdev, qpc_extension) || in mlx5_ib_qp_err_syndrome()
327 !MLX5_CAP_GEN(dev->mdev, qp_error_syndrome)) in mlx5_ib_qp_err_syndrome()
440 if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) in set_rq_size()
475 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) { in set_rq_size()
478 MLX5_CAP_GEN(dev->mdev, in set_rq_size()
605 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { in calc_sq_size()
607 wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); in calc_sq_size()
617 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { in calc_sq_size()
621 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); in calc_sq_size()
644 if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { in set_user_buf_size()
[all …]
H A Dumr.c55 if (MLX5_CAP_GEN(dev->mdev, atomic)) in get_umr_update_access_mask()
58 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) in get_umr_update_access_mask()
61 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) in get_umr_update_access_mask()
79 MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) in umr_check_mkey_mask()
83 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) in umr_check_mkey_mask()
87 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) in umr_check_mkey_mask()
91 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) in umr_check_mkey_mask()
445 (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) || in mlx5r_umr_set_access_flags()
779 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); in umr_can_use_indirect_mkey()
H A Dcmd.c15 is_terminate = MLX5_CAP_GEN(dev->mdev, terminate_scatter_list_mkey); in mlx5r_cmd_query_special_mkeys()
16 is_dump = MLX5_CAP_GEN(dev->mdev, dump_fill_mkey); in mlx5r_cmd_query_special_mkeys()
17 is_null = MLX5_CAP_GEN(dev->mdev, null_mkey); in mlx5r_cmd_query_special_mkeys()
259 MLX5_SET(query_vuid_in, in, vhca_id, MLX5_CAP_GEN(dev, vhca_id)); in mlx5_cmd_query_vuid()
H A Dsrq.c93 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 && in create_srq_user()
154 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 && in create_srq_kernel()
201 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); in mlx5_ib_create_srq()
202 __u32 max_sge_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) / in mlx5_ib_create_srq()
272 MLX5_CAP_GEN(dev->mdev, log_tag_matching_list_sz)) { in mlx5_ib_create_srq()
/linux/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Dsf.h11 return MLX5_CAP_GEN(dev, sf_base_id); in mlx5_sf_start_function_id()
18 return MLX5_CAP_GEN(dev, sf); in mlx5_sf_supported()
25 if (MLX5_CAP_GEN(dev, max_num_sf)) in mlx5_sf_max_functions()
26 return MLX5_CAP_GEN(dev, max_num_sf); in mlx5_sf_max_functions()
28 return 1 << MLX5_CAP_GEN(dev, log_max_sf); in mlx5_sf_max_functions()
H A Dsd.c108 if (!MLX5_CAP_GEN(dev, eswitch_manager)) in mlx5_sd_is_supported()
110 if (!MLX5_CAP_GEN(dev, silent_mode)) in mlx5_sd_is_supported()
114 if (!MLX5_CAP_GEN(dev, cross_vhca_rqt)) in mlx5_sd_is_supported()
146 return (u32)((MLX5_CAP_GEN(dev, native_port_num) << 8) | sd_group); in mlx5_sd_group_id()
322 u16 vhca_id_to_be_accessed = MLX5_CAP_GEN(primary, vhca_id); in sd_secondary_create_alias_ft()
393 MLX5_CAP_GEN(primary, vhca_id)); in sd_print_group()
397 MLX5_CAP_GEN(pos, vhca_id)); in sd_print_group()
409 MLX5_CAP_GEN(dev, vhca_id)); in dev_read()
H A Dclock.h38 u8 rq_ts_format_cap = MLX5_CAP_GEN(mdev, rq_ts_format); in mlx5_is_real_time_rq()
47 u8 sq_ts_format_cap = MLX5_CAP_GEN(mdev, sq_ts_format); in mlx5_is_real_time_sq()
/linux/drivers/net/ethernet/mellanox/mlx5/core/diag/
H A Dreporter_vnic.c32 if (MLX5_CAP_GEN(dev, vnic_env_queue_counters)) { in mlx5_reporter_vnic_diagnose_counters()
38 if (MLX5_CAP_GEN(dev, eq_overrun_count)) { in mlx5_reporter_vnic_diagnose_counters()
44 if (MLX5_CAP_GEN(dev, vnic_env_cq_overrun)) in mlx5_reporter_vnic_diagnose_counters()
47 if (MLX5_CAP_GEN(dev, invalid_command_count)) in mlx5_reporter_vnic_diagnose_counters()
50 if (MLX5_CAP_GEN(dev, quota_exceeded_count)) in mlx5_reporter_vnic_diagnose_counters()
53 if (MLX5_CAP_GEN(dev, nic_receive_steering_discard)) in mlx5_reporter_vnic_diagnose_counters()
56 if (MLX5_CAP_GEN(dev, vnic_env_cnt_steering_fail)) { in mlx5_reporter_vnic_diagnose_counters()
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dmonitor_stats.c25 if (!MLX5_CAP_GEN(mdev, max_num_of_monitor_counters)) in mlx5e_monitor_counter_cap()
28 MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters) < in mlx5e_monitor_counter_cap()
31 if (MLX5_CAP_GEN(mdev, num_q_monitor_counters) < in mlx5e_monitor_counter_cap()
114 int max_num_of_counters = MLX5_CAP_GEN(mdev, max_num_of_monitor_counters); in mlx5e_set_monitor_counter()
115 int num_q_counters = MLX5_CAP_GEN(mdev, num_q_monitor_counters); in mlx5e_set_monitor_counter()
117 MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters); in mlx5e_set_monitor_counter()
H A Dchannels.c34 *vhca_id = MLX5_CAP_GEN(c->mdev, vhca_id); in mlx5e_channels_get_regular_rqn()
46 *vhca_id = MLX5_CAP_GEN(c->mdev, vhca_id); in mlx5e_channels_get_xsk_rqn()
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Dvport.c271 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) : in mlx5_query_nic_vport_mac_list()
272 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list); in mlx5_query_nic_vport_mac_list()
330 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) : in mlx5_modify_nic_vport_mac_list()
331 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list); in mlx5_modify_nic_vport_mac_list()
381 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list); in mlx5_modify_nic_vport_vlans()
492 if (!MLX5_CAP_GEN(mdev, vport_group_manager)) in mlx5_modify_nic_vport_node_guid()
552 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); in mlx5_query_hca_vport_gid()
553 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size)); in mlx5_query_hca_vport_gid()
586 if (MLX5_CAP_GEN(dev, num_ports) == 2) in mlx5_query_hca_vport_gid()
619 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager); in mlx5_query_hca_vport_pkey()
[all …]
H A Den_dcbnl.c60 #define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \
92 if (!MLX5_CAP_GEN(priv->mdev, dcbx)) in mlx5e_dcbnl_switch_to_host_mode()
117 if (!MLX5_CAP_GEN(priv->mdev, ets)) in mlx5e_dcbnl_ieee_getets()
328 if (!MLX5_CAP_GEN(priv->mdev, ets)) in mlx5e_dcbnl_ieee_setets()
424 if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) { in mlx5e_dcbnl_setdcbx()
456 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) || in mlx5e_dcbnl_ieee_setapp()
509 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) || in mlx5e_dcbnl_ieee_delapp()
631 if (!MLX5_CAP_GEN(mdev, ets)) in mlx5e_dcbnl_setall()
740 if (!MLX5_CAP_GEN(priv->mdev, ets)) { in mlx5e_dcbnl_getpgtccfgtx()
1028 if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos)) in mlx5e_dcbnl_build_netdev()
[all …]
H A Duar.c63 if (MLX5_CAP_GEN(mdev, uar_4k)) in uars_per_sys_page()
64 return MLX5_CAP_GEN(mdev, num_of_uars_per_page); in uars_per_sys_page()
73 if (MLX5_CAP_GEN(mdev, uar_4k)) in uar2pfn()
201 (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET; in map_offset()
281 bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size); in addr_to_dbi_in_syspage()
H A Den_common.c42 bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write); in mlx5e_mkey_set_relaxed_ordering()
43 bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read) || in mlx5e_mkey_set_relaxed_ordering()
45 MLX5_CAP_GEN(mdev, relaxed_ordering_read_pci_enabled)); in mlx5e_mkey_set_relaxed_ordering()
105 return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1; in mlx5_lag_should_assign_affinity()
H A Ddevlink.c413 if (new_state && !MLX5_CAP_GEN(dev, roce) && in mlx5_devlink_enable_roce_validate()
414 !(MLX5_CAP_GEN(dev, roce_rw_supported) && MLX5_CAP_GEN_MAX(dev, roce))) { in mlx5_devlink_enable_roce_validate()
471 if (val32 > BIT(MLX5_CAP_GEN(dev, log_max_hairpin_num_packets))) { in mlx5_devlink_hairpin_queue_size_validate()
474 BIT(MLX5_CAP_GEN(dev, log_max_hairpin_num_packets))); in mlx5_devlink_hairpin_queue_size_validate()
500 MLX5_CAP_GEN(dev, log_max_hairpin_num_packets))); in mlx5_devlink_hairpin_params_init_values()
526 value.vbool = MLX5_CAP_GEN(dev, roce) && !mlx5_dev_is_lightweight(dev); in mlx5_devlink_set_params_init_values()
756 value.vu32 = 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list); in mlx5_devlink_max_uc_list_params_register()
H A Den_ethtool.c554 if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) { in mlx5e_ethtool_get_coalesce()
591 if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) in mlx5e_ethtool_get_per_queue_coalesce()
702 if (!MLX5_CAP_GEN(mdev, cq_moderation) || in mlx5e_ethtool_set_coalesce()
703 !MLX5_CAP_GEN(mdev, cq_period_mode_modify)) { in mlx5e_ethtool_set_coalesce()
729 !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) { in mlx5e_ethtool_set_coalesce()
822 if (!MLX5_CAP_GEN(mdev, cq_moderation)) in mlx5e_ethtool_set_per_queue_coalesce()
1641 if (!MLX5_CAP_GEN(mdev, vport_group_manager)) in mlx5e_ethtool_set_pauseparam()
1673 if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) || in mlx5e_ethtool_get_ts_info()
1702 if (MLX5_CAP_GEN(mdev, wol_g)) in mlx5e_get_wol_supported()
1705 if (MLX5_CAP_GEN(mdev, wol_s)) in mlx5e_get_wol_supported()
[all …]
H A Dpci_irq.c69 min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size); in mlx5_get_default_msix_vec_count()
70 max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size); in mlx5_get_default_msix_vec_count()
101 if (!MLX5_CAP_GEN(dev, vport_group_manager) || !mlx5_core_is_pf(dev)) in mlx5_set_msix_vec_count()
104 min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size); in mlx5_set_msix_vec_count()
105 max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size); in mlx5_set_msix_vec_count()
749 pcif_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1; in mlx5_irq_table_create()
H A Dwc.c120 if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128) in mlx5_wc_create_cq()
260 int buf_size = (1 << MLX5_CAP_GEN(sq->cq.mdev, log_bf_reg_size)) / 2; in mlx5_wc_post_nop()
388 if (!MLX5_CAP_GEN(mdev, bf)) { in mlx5_wc_support_get()
393 if (!MLX5_CAP_GEN(mdev, log_max_sq)) { in mlx5_wc_support_get()
/linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
H A Dktls.h26 if (!MLX5_CAP_GEN(mdev, tls_tx) && !MLX5_CAP_GEN(mdev, tls_rx)) in mlx5e_is_ktls_device()
29 if (!MLX5_CAP_GEN(mdev, log_max_dek)) in mlx5e_is_ktls_device()
65 return !is_kdump_kernel() && MLX5_CAP_GEN(mdev, tls_tx) && in mlx5e_is_ktls_tx()
/linux/drivers/net/ethernet/mellanox/mlx5/core/lag/
H A Dlag.h120 if (!MLX5_CAP_GEN(dev, vport_group_manager) || in mlx5_lag_is_supported()
121 !MLX5_CAP_GEN(dev, lag_master) || in mlx5_lag_is_supported()
122 MLX5_CAP_GEN(dev, num_lag_ports) < 2 || in mlx5_lag_is_supported()
124 MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS) in mlx5_lag_is_supported()
/linux/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/
H A Degress_ofld.c77 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) { in esw_acl_egress_ofld_rules_create()
125 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) { in esw_acl_egress_ofld_groups_create()
192 !MLX5_CAP_GEN(esw->dev, prio_tag_required)) in esw_acl_egress_ofld_setup()
202 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) in esw_acl_egress_ofld_setup()
259 fwd_dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); in mlx5_esw_acl_egress_vport_bond()
/linux/include/linux/mlx5/
H A Ddriver.h1222 return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager); in mlx5_core_is_ecpf_esw_manager()
1242 return MLX5_CAP_GEN(dev, vport_group_manager) && in mlx5_lag_is_lacp_owner()
1243 (MLX5_CAP_GEN(dev, num_lag_ports) > 1) && in mlx5_lag_is_lacp_owner()
1244 MLX5_CAP_GEN(dev, lag_master); in mlx5_lag_is_lacp_owner()
1269 return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) && in mlx5_core_is_mp_slave()
1270 MLX5_CAP_GEN(dev, num_vhca_ports) <= 1; in mlx5_core_is_mp_slave()
1275 return MLX5_CAP_GEN(dev, num_vhca_ports) > 1; in mlx5_core_is_mp_master()
1289 return MLX5_CAP_GEN(dev, native_port_num); in mlx5_core_native_port_num()
1294 int idx = MLX5_CAP_GEN(dev, native_port_num); in mlx5_get_dev_index()
1310 if (MLX5_CAP_GEN(dev, roce_rw_supported)) in mlx5_get_roce_state()
[all …]

1234