Home
last modified time | relevance | path

Searched refs:peer_dev (Results 1 – 7 of 7) sorted by relevance

/linux/drivers/infiniband/hw/mlx5/
H A Dib_rep.c33 struct mlx5_core_dev *peer_dev; in mlx5_ib_num_ports_update() local
36 mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) { in mlx5_ib_num_ports_update()
37 u32 peer_num_ports = mlx5_eswitch_get_total_vports(peer_dev); in mlx5_ib_num_ports_update()
39 if (mlx5_lag_is_mpesw(peer_dev)) in mlx5_ib_num_ports_update()
78 struct mlx5_core_dev *peer_dev; in mlx5_ib_release_transport() local
81 mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) { in mlx5_ib_release_transport()
82 ret = mlx5_ib_set_owner_transport(peer_dev, peer_dev); in mlx5_ib_release_transport()
89 struct mlx5_core_dev *peer_dev; in mlx5_ib_take_transport() local
93 mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) { in mlx5_ib_take_transport()
94 ret = mlx5_ib_set_owner_transport(peer_dev, dev); in mlx5_ib_take_transport()
[all …]
/linux/drivers/net/netdevsim/
H A Dnetdev.c43 struct net_device *peer_dev; in nsim_start_peer_tx_queue() local
55 peer_dev = peer_ns->netdev; in nsim_start_peer_tx_queue()
56 if (dev->real_num_tx_queues != peer_dev->num_rx_queues) in nsim_start_peer_tx_queue()
59 txq = netdev_get_tx_queue(peer_dev, idx); in nsim_start_peer_tx_queue()
124 struct net_device *peer_dev; in nsim_start_xmit() local
139 peer_dev = dev; in nsim_start_xmit()
144 peer_dev = peer_ns->netdev; in nsim_start_xmit()
152 if (rxq >= peer_dev->num_rx_queues) in nsim_start_xmit()
153 rxq = rxq % peer_dev->num_rx_queues; in nsim_start_xmit()
156 cfg = peer_dev->cfg; in nsim_start_xmit()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Dclock.c1438 struct mlx5_core_dev *peer_dev, *next = NULL; in mlx5_shared_clock_register() local
1455 mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) { in mlx5_shared_clock_register()
1456 if (peer_dev->clock) { in mlx5_shared_clock_register()
1457 next = peer_dev; in mlx5_shared_clock_register()
1479 struct mlx5_core_dev *peer_dev, *next = NULL; in mlx5_shared_clock_unregister() local
1484 mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) { in mlx5_shared_clock_unregister()
1485 if (peer_dev->clock && peer_dev != mdev) { in mlx5_shared_clock_unregister()
1486 next = peer_dev; in mlx5_shared_clock_unregister()
1562 struct mlx5_core_dev *peer_dev, *next = NULL; in mlx5_clock_unload() local
1575 mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) { in mlx5_clock_unload()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Deswitch_offloads.c1128 struct mlx5_core_dev *peer_dev, in peer_miss_rules_setup() argument
1146 MLX5_CAP_GEN(peer_dev, vhca_id)); in peer_miss_rules_setup()
1158 dest->vport.num = peer_dev->priv.eswitch->manager_vport; in peer_miss_rules_setup()
1159 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id); in peer_miss_rules_setup()
1184 struct mlx5_core_dev *peer_dev) in esw_add_fdb_peer_miss_rules() argument
1186 struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch; in esw_add_fdb_peer_miss_rules()
1197 if (!MLX5_VPORT_MANAGER(peer_dev) && in esw_add_fdb_peer_miss_rules()
1198 !mlx5_core_is_ecpf_esw_manager(peer_dev)) in esw_add_fdb_peer_miss_rules()
1205 peer_miss_rules_setup(esw, peer_dev, spec, &dest); in esw_add_fdb_peer_miss_rules()
1217 if (mlx5_core_is_ecpf_esw_manager(peer_dev) && in esw_add_fdb_peer_miss_rules()
[all …]
H A Dfs_cmd.c252 struct mlx5_core_dev *peer_dev; in mlx5_cmd_update_root_ft() local
255 mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) { in mlx5_cmd_update_root_ft()
256 err = mlx5_cmd_set_slave_root_fdb(dev, peer_dev, !disconnect, in mlx5_cmd_update_root_ft()
259 mlx5_lag_for_each_peer_mdev(dev, peer_dev, j) { in mlx5_cmd_update_root_ft()
261 mlx5_cmd_set_slave_root_fdb(dev, peer_dev, 1, in mlx5_cmd_update_root_ft()
/linux/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_topology.c1323 struct kfd_topology_device *peer_dev; in kfd_fill_iolink_non_crat_info() local
1332 peer_dev = kfd_topology_device_by_proximity_domain( in kfd_fill_iolink_non_crat_info()
1335 if (!peer_dev) in kfd_fill_iolink_non_crat_info()
1339 if (!peer_dev->gpu && in kfd_fill_iolink_non_crat_info()
1347 peer_dev->node_props.hive_id = dev->node_props.hive_id; in kfd_fill_iolink_non_crat_info()
1350 list_for_each_entry(inbound_link, &peer_dev->io_link_props, in kfd_fill_iolink_non_crat_info()
1356 kfd_set_iolink_no_atomics(peer_dev, dev, inbound_link); in kfd_fill_iolink_non_crat_info()
1357 kfd_set_iolink_non_coherent(peer_dev, link, inbound_link); in kfd_fill_iolink_non_crat_info()
1358 kfd_set_recommended_sdma_engines(peer_dev, link, inbound_link); in kfd_fill_iolink_non_crat_info()
1366 peer_dev = kfd_topology_device_by_proximity_domain( in kfd_fill_iolink_non_crat_info()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/lag/
H A Dlag.c1720 struct mlx5_core_dev *peer_dev = NULL; in mlx5_lag_get_next_peer_mdev() local
1742 peer_dev = ldev->pf[idx].dev; in mlx5_lag_get_next_peer_mdev()
1746 return peer_dev; in mlx5_lag_get_next_peer_mdev()