| /linux/drivers/infiniband/hw/mthca/ |
| H A D | mthca_main.c | 136 static int mthca_tune_pci(struct mthca_dev *mdev) in mthca_tune_pci() argument 142 if (pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX)) { in mthca_tune_pci() 143 if (pcix_set_mmrbc(mdev->pdev, pcix_get_max_mmrbc(mdev->pdev))) { in mthca_tune_pci() 144 mthca_err(mdev, "Couldn't set PCI-X max read count, " in mthca_tune_pci() 148 } else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) in mthca_tune_pci() 149 mthca_info(mdev, "No PCI-X capability, not setting RBC.\n"); in mthca_tune_pci() 151 if (pci_is_pcie(mdev->pdev)) { in mthca_tune_pci() 152 if (pcie_set_readrq(mdev->pdev, 4096)) { in mthca_tune_pci() 153 mthca_err(mdev, "Couldn't write PCI Express read request, " in mthca_tune_pci() 157 } else if (mdev->mthca_flags & MTHCA_FLAG_PCIE) in mthca_tune_pci() [all …]
|
| H A D | mthca_reset.c | 41 int mthca_reset(struct mthca_dev *mdev) in mthca_reset() argument 69 if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) { in mthca_reset() 72 while ((bridge = pci_get_device(mdev->pdev->vendor, in mthca_reset() 73 mdev->pdev->device + 2, in mthca_reset() 76 bridge->subordinate == mdev->pdev->bus) { in mthca_reset() 77 mthca_dbg(mdev, "Found bridge: %s\n", in mthca_reset() 89 mthca_warn(mdev, "No bridge found for %s\n", in mthca_reset() 90 pci_name(mdev->pdev)); in mthca_reset() 105 if (pci_read_config_dword(mdev->pdev, i * 4, hca_header + i)) { in mthca_reset() 107 mthca_err(mdev, "Couldn't save HCA " in mthca_reset() [all …]
|
| /linux/drivers/sh/maple/ |
| H A D | maple.c | 139 struct maple_device *mdev; in maple_release_device() local 142 mdev = to_maple_dev(dev); in maple_release_device() 143 mq = mdev->mq; in maple_release_device() 146 kfree(mdev); in maple_release_device() 157 int maple_add_packet(struct maple_device *mdev, u32 function, u32 command, in maple_add_packet() argument 172 mdev->mq->command = command; in maple_add_packet() 173 mdev->mq->length = length; in maple_add_packet() 176 mdev->mq->sendbuf = sendbuf; in maple_add_packet() 179 list_add_tail(&mdev->mq->list, &maple_waitq); in maple_add_packet() 186 static struct mapleq *maple_allocq(struct maple_device *mdev) in maple_allocq() argument [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx4/ |
| H A D | en_clock.c | 43 struct mlx4_en_dev *mdev = in mlx4_en_read_clock() local 45 struct mlx4_dev *dev = mdev->dev; in mlx4_en_read_clock() 61 u64 mlx4_en_get_hwtstamp(struct mlx4_en_dev *mdev, u64 timestamp) in mlx4_en_get_hwtstamp() argument 67 seq = read_seqbegin(&mdev->clock_lock); in mlx4_en_get_hwtstamp() 68 nsec = timecounter_cyc2time(&mdev->clock, timestamp); in mlx4_en_get_hwtstamp() 69 } while (read_seqretry(&mdev->clock_lock, seq)); in mlx4_en_get_hwtstamp() 74 void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev, in mlx4_en_fill_hwtstamps() argument 79 hwts->hwtstamp = mlx4_en_get_hwtstamp(mdev, timestamp); in mlx4_en_fill_hwtstamps() 88 void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev) in mlx4_en_remove_timestamp() argument 90 if (mdev->ptp_clock) { in mlx4_en_remove_timestamp() [all …]
|
| H A D | en_main.c | 96 level, DRV_NAME, dev_name(&priv->mdev->pdev->dev), in en_print() 117 if (mlx4_is_mfunc(priv->mdev->dev) && in mlx4_en_update_loopback_state() 124 if (mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback) in mlx4_en_update_loopback_state() 127 mutex_lock(&priv->mdev->state_lock); in mlx4_en_update_loopback_state() 128 if ((priv->mdev->dev->caps.flags2 & in mlx4_en_update_loopback_state() 145 mlx4_warn(priv->mdev, "failed to change mcast loopback\n"); in mlx4_en_update_loopback_state() 147 mutex_unlock(&priv->mdev->state_lock); in mlx4_en_update_loopback_state() 150 static void mlx4_en_get_profile(struct mlx4_en_dev *mdev) in mlx4_en_get_profile() argument 152 struct mlx4_en_profile *params = &mdev->profile; in mlx4_en_get_profile() 160 if (params->udp_rss && !(mdev->dev->caps.flags in mlx4_en_get_profile() [all …]
|
| /linux/drivers/most/ |
| H A D | most_usb.c | 77 struct most_dev *mdev; member 238 struct most_dev *mdev = to_mdev(iface); in hdm_poison_channel() local 243 dev_warn(&mdev->usb_device->dev, "Channel ID out of range.\n"); in hdm_poison_channel() 247 lock = mdev->channel_lock + channel; in hdm_poison_channel() 249 mdev->is_channel_healthy[channel] = false; in hdm_poison_channel() 252 cancel_work_sync(&mdev->clear_work[channel].ws); in hdm_poison_channel() 254 mutex_lock(&mdev->io_mutex); in hdm_poison_channel() 255 usb_kill_anchored_urbs(&mdev->busy_urbs[channel]); in hdm_poison_channel() 256 if (mdev->padding_active[channel]) in hdm_poison_channel() 257 mdev->padding_active[channel] = false; in hdm_poison_channel() [all …]
|
| /linux/drivers/mtd/maps/ |
| H A D | vmu-flash.c | 24 struct maple_device *mdev; member 60 struct maple_device *mdev; in ofs_to_block() local 66 mdev = mpart->mdev; in ofs_to_block() 67 card = maple_get_drvdata(mdev); in ofs_to_block() 91 struct maple_device *mdev; in vmu_blockread() local 94 mdev = mq->dev; in vmu_blockread() 95 card = maple_get_drvdata(mdev); in vmu_blockread() 114 struct maple_device *mdev; in maple_vmu_read_block() local 121 mdev = mpart->mdev; in maple_vmu_read_block() 123 card = maple_get_drvdata(mdev); in maple_vmu_read_block() [all …]
|
| /linux/drivers/crypto/intel/qat/qat_common/ |
| H A D | qat_mig_dev.c | 15 struct qat_mig_dev *mdev; in qat_vfmig_create() local 27 mdev = kmalloc(sizeof(*mdev), GFP_KERNEL); in qat_vfmig_create() 28 if (!mdev) in qat_vfmig_create() 31 mdev->vf_id = vf_id; in qat_vfmig_create() 32 mdev->parent_accel_dev = accel_dev; in qat_vfmig_create() 34 return mdev; in qat_vfmig_create() 38 int qat_vfmig_init(struct qat_mig_dev *mdev) in qat_vfmig_init() argument 40 struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; in qat_vfmig_init() 42 return GET_VFMIG_OPS(accel_dev)->init(mdev); in qat_vfmig_init() 46 void qat_vfmig_cleanup(struct qat_mig_dev *mdev) in qat_vfmig_cleanup() argument [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
| H A D | clock.c | 83 struct mlx5_core_dev *mdev; member 91 struct mlx5_core_dev *mdev; member 113 return clock_priv(clock)->mdev; in mlx5_clock_mdev_get() 132 static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev) in mlx5_real_time_mode() argument 134 return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev)); in mlx5_real_time_mode() 137 static bool mlx5_npps_real_time_supported(struct mlx5_core_dev *mdev) in mlx5_npps_real_time_supported() argument 139 return (mlx5_real_time_mode(mdev) && in mlx5_npps_real_time_supported() 140 MLX5_CAP_MCAM_FEATURE(mdev, npps_period) && in mlx5_npps_real_time_supported() 141 MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns)); in mlx5_npps_real_time_supported() 144 static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev) in mlx5_modify_mtutc_allowed() argument [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
| H A D | params.c | 17 static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev) in mlx5e_mpwrq_min_page_shift() argument 19 u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size); in mlx5e_mpwrq_min_page_shift() 24 u8 mlx5e_mpwrq_page_shift(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk) in mlx5e_mpwrq_page_shift() argument 27 u8 min_page_shift = mlx5e_mpwrq_min_page_shift(mdev); in mlx5e_mpwrq_page_shift() 37 mlx5e_mpwrq_umr_mode(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk) in mlx5e_mpwrq_umr_mode() argument 48 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); in mlx5e_mpwrq_umr_mode() 106 u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, in mlx5e_mpwrq_log_wqe_sz() argument 115 max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB; in mlx5e_mpwrq_log_wqe_sz() 122 max_log_wqe_size_cap = mlx5_core_is_ecpf(mdev) ? in mlx5e_mpwrq_log_wqe_sz() 128 u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, in mlx5e_mpwrq_pages_per_wqe() argument [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/ |
| H A D | driver.c | 28 struct mlx5_core_dev *mdev; in mlx5_sf_dev_probe() local 36 mdev = devlink_priv(devlink); in mlx5_sf_dev_probe() 37 mdev->device = &adev->dev; in mlx5_sf_dev_probe() 38 mdev->pdev = sf_dev->parent_mdev->pdev; in mlx5_sf_dev_probe() 39 mdev->bar_addr = sf_dev->bar_base_addr; in mlx5_sf_dev_probe() 40 mdev->iseg_base = sf_dev->bar_base_addr; in mlx5_sf_dev_probe() 41 mdev->coredev_type = MLX5_COREDEV_SF; in mlx5_sf_dev_probe() 42 mdev->priv.parent_mdev = sf_dev->parent_mdev; in mlx5_sf_dev_probe() 43 mdev->priv.adev_idx = adev->id; in mlx5_sf_dev_probe() 44 sf_dev->mdev = mdev; in mlx5_sf_dev_probe() [all …]
|
| /linux/net/mctp/ |
| H A D | device.c | 32 struct mctp_dev *mdev = rcu_dereference(dev->mctp_ptr); in __mctp_dev_get() local 37 if (mdev) in __mctp_dev_get() 38 if (!refcount_inc_not_zero(&mdev->refs)) in __mctp_dev_get() 40 return mdev; in __mctp_dev_get() 61 struct mctp_dev *mdev, mctp_eid_t eid, in mctp_fill_addrinfo() argument 77 hdr->ifa_index = mdev->dev->ifindex; in mctp_fill_addrinfo() 94 static int mctp_dump_dev_addrinfo(struct mctp_dev *mdev, struct sk_buff *skb, in mctp_dump_dev_addrinfo() argument 103 for (; mcb->a_idx < mdev->num_addrs; mcb->a_idx++) { in mctp_dump_dev_addrinfo() 104 rc = mctp_fill_addrinfo(skb, mdev, mdev->addrs[mcb->a_idx], in mctp_dump_dev_addrinfo() 119 struct mctp_dev *mdev; in mctp_dump_addrinfo() local [all …]
|
| /linux/drivers/virtio/ |
| H A D | virtio_pci_modern_dev.c | 22 vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, in vp_modern_map_capability() argument 26 struct pci_dev *dev = mdev->pci_dev; in vp_modern_map_capability() 40 if (bar >= PCI_STD_NUM_BARS || !(mdev->modern_bars & (1 << bar))) { in vp_modern_map_capability() 223 int vp_modern_probe(struct virtio_pci_modern_device *mdev) in vp_modern_probe() argument 225 struct pci_dev *pci_dev = mdev->pci_dev; in vp_modern_probe() 233 if (mdev->device_id_check) { in vp_modern_probe() 234 devid = mdev->device_id_check(pci_dev); in vp_modern_probe() 237 mdev->id.device = devid; in vp_modern_probe() 247 mdev->id.device = pci_dev->subsystem_device; in vp_modern_probe() 250 mdev->id.device = pci_dev->device - 0x1040; in vp_modern_probe() [all …]
|
| /linux/drivers/media/mc/ |
| H A D | mc-device.c | 81 static struct media_entity *find_entity(struct media_device *mdev, u32 id) in find_entity() argument 88 media_device_for_each_entity(entity, mdev) { in find_entity() 98 static long media_device_enum_entities(struct media_device *mdev, void *arg) in media_device_enum_entities() argument 103 ent = find_entity(mdev, entd->id); in media_device_enum_entities() 151 static long media_device_enum_links(struct media_device *mdev, void *arg) in media_device_enum_links() argument 156 entity = find_entity(mdev, links->entity); in media_device_enum_links() 200 static long media_device_setup_link(struct media_device *mdev, void *arg) in media_device_setup_link() argument 209 source = find_entity(mdev, linkd->source.entity); in media_device_setup_link() 210 sink = find_entity(mdev, linkd->sink.entity); in media_device_setup_link() 230 static long media_device_get_topology(struct media_device *mdev, void *arg) in media_device_get_topology() argument [all …]
|
| /linux/drivers/mailbox/ |
| H A D | mailbox-sti.c | 36 #define MBOX_BASE(mdev, inst) ((mdev)->base + ((inst) * 4)) argument 81 struct sti_mbox_device *mdev; member 89 struct sti_mbox_device *mdev = chan_info->mdev; in sti_mbox_channel_is_enabled() local 93 return mdev->enabled[instance] & BIT(channel); in sti_mbox_channel_is_enabled() 122 struct sti_mbox_device *mdev = chan_info->mdev; in sti_mbox_enable_channel() local 126 void __iomem *base = MBOX_BASE(mdev, instance); in sti_mbox_enable_channel() 128 spin_lock_irqsave(&mdev->lock, flags); in sti_mbox_enable_channel() 129 mdev->enabled[instance] |= BIT(channel); in sti_mbox_enable_channel() 131 spin_unlock_irqrestore(&mdev->lock, flags); in sti_mbox_enable_channel() 137 struct sti_mbox_device *mdev = chan_info->mdev; in sti_mbox_disable_channel() local [all …]
|
| /linux/drivers/dma/qcom/ |
| H A D | hidma_mgmt_sys.c | 14 struct hidma_mgmt_dev *mdev; member 22 int (*get)(struct hidma_mgmt_dev *mdev); 23 int (*set)(struct hidma_mgmt_dev *mdev, u64 val); 27 static int get_##name(struct hidma_mgmt_dev *mdev) \ 29 return mdev->name; \ 31 static int set_##name(struct hidma_mgmt_dev *mdev, u64 val) \ 36 tmp = mdev->name; \ 37 mdev->name = val; \ 38 rc = hidma_mgmt_setup(mdev); \ 40 mdev->name = tmp; \ [all …]
|
| /linux/include/media/ |
| H A D | media-device.h | 69 struct media_request *(*req_alloc)(struct media_device *mdev); 217 void media_device_init(struct media_device *mdev); 227 void media_device_cleanup(struct media_device *mdev); 270 int __must_check __media_device_register(struct media_device *mdev, 282 #define media_device_register(mdev) __media_device_register(mdev, THIS_MODULE) argument 292 void media_device_unregister(struct media_device *mdev); 330 int __must_check media_device_register_entity(struct media_device *mdev, 367 void media_device_register_entity_notify(struct media_device *mdev, 378 void media_device_unregister_entity_notify(struct media_device *mdev, 382 #define media_device_for_each_entity(entity, mdev) \ argument [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| H A D | wc.c | 28 struct mlx5_core_dev *mdev; member 48 static int mlx5_wc_create_cqwq(struct mlx5_core_dev *mdev, void *cqc, in mlx5_wc_create_cqwq() argument 56 err = mlx5_cqwq_create(mdev, ¶m, cqc, &cq->wq, &cq->wq_ctrl); in mlx5_wc_create_cqwq() 70 cq->mdev = mdev; in mlx5_wc_create_cqwq() 78 struct mlx5_core_dev *mdev = cq->mdev; in create_wc_cq() local 83 err = mlx5_comp_eqn_get(mdev, 0, &eqn); in create_wc_cq() 102 MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index); in create_wc_cq() 107 err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out)); in create_wc_cq() 114 static int mlx5_wc_create_cq(struct mlx5_core_dev *mdev, struct mlx5_wc_cq *cq) in mlx5_wc_create_cq() argument 124 MLX5_SET(cqc, cqc, uar_page, mdev->priv.bfreg.up->index); in mlx5_wc_create_cq() [all …]
|
| H A D | wq.c | 37 int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, in mlx5_wq_cyc_create() argument 46 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); in mlx5_wq_cyc_create() 48 mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err); in mlx5_wq_cyc_create() 54 err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride), in mlx5_wq_cyc_create() 57 mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err); in mlx5_wq_cyc_create() 64 wq_ctrl->mdev = mdev; in mlx5_wq_cyc_create() 69 mlx5_db_free(mdev, &wq_ctrl->db); in mlx5_wq_cyc_create() 99 int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, in mlx5_wq_qp_create() argument 113 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); in mlx5_wq_qp_create() 115 mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err); in mlx5_wq_qp_create() [all …]
|
| H A D | uar.c | 61 static int uars_per_sys_page(struct mlx5_core_dev *mdev) in uars_per_sys_page() argument 63 if (MLX5_CAP_GEN(mdev, uar_4k)) in uars_per_sys_page() 64 return MLX5_CAP_GEN(mdev, num_of_uars_per_page); in uars_per_sys_page() 69 static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index) in uar2pfn() argument 73 if (MLX5_CAP_GEN(mdev, uar_4k)) in uar2pfn() 78 return (mdev->bar_addr >> PAGE_SHIFT) + system_page_index; in uar2pfn() 87 if (mlx5_cmd_free_uar(up->mdev, up->index)) in up_rel_func() 88 mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index); in up_rel_func() 94 static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev, in alloc_uars_page() argument 104 bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR; in alloc_uars_page() [all …]
|
| /linux/drivers/net/ethernet/marvell/octeontx2/af/ |
| H A D | mbox.c | 23 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; in __otx2_mbox_reset() local 25 void *hw_mbase = mdev->hwbase; in __otx2_mbox_reset() 30 mdev->msg_size = 0; in __otx2_mbox_reset() 31 mdev->rsp_size = 0; in __otx2_mbox_reset() 43 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; in otx2_mbox_reset() local 45 spin_lock(&mdev->mbox_lock); in otx2_mbox_reset() 47 spin_unlock(&mdev->mbox_lock); in otx2_mbox_reset() 227 struct otx2_mbox_dev *mdev; in otx2_mbox_init() local 237 mdev = &mbox->dev[devid]; in otx2_mbox_init() 238 mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE); in otx2_mbox_init() [all …]
|
| /linux/include/linux/ |
| H A D | virtio_pci_modern.h | 100 vp_modern_get_driver_extended_features(struct virtio_pci_modern_device *mdev, 102 void vp_modern_get_extended_features(struct virtio_pci_modern_device *mdev, 104 void vp_modern_set_extended_features(struct virtio_pci_modern_device *mdev, 108 vp_modern_get_features(struct virtio_pci_modern_device *mdev) in vp_modern_get_features() argument 112 vp_modern_get_extended_features(mdev, features_array); in vp_modern_get_features() 117 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev) in vp_modern_get_driver_features() argument 122 vp_modern_get_driver_extended_features(mdev, features_array); in vp_modern_get_driver_features() 129 vp_modern_set_features(struct virtio_pci_modern_device *mdev, u64 features) in vp_modern_set_features() argument 134 vp_modern_set_extended_features(mdev, features_array); in vp_modern_set_features() 137 u32 vp_modern_generation(struct virtio_pci_modern_device *mdev); [all …]
|
| /linux/drivers/w1/masters/ |
| H A D | mxc_w1.c | 93 struct mxc_w1_device *mdev; in mxc_w1_probe() local 98 mdev = devm_kzalloc(&pdev->dev, sizeof(struct mxc_w1_device), in mxc_w1_probe() 100 if (!mdev) in mxc_w1_probe() 103 mdev->clk = devm_clk_get(&pdev->dev, NULL); in mxc_w1_probe() 104 if (IS_ERR(mdev->clk)) in mxc_w1_probe() 105 return PTR_ERR(mdev->clk); in mxc_w1_probe() 107 err = clk_prepare_enable(mdev->clk); in mxc_w1_probe() 111 clkrate = clk_get_rate(mdev->clk); in mxc_w1_probe() 122 mdev->regs = devm_platform_ioremap_resource(pdev, 0); in mxc_w1_probe() 123 if (IS_ERR(mdev->regs)) { in mxc_w1_probe() [all …]
|
| /linux/include/linux/mlx5/ |
| H A D | vport.h | 39 #define MLX5_VPORT_MANAGER(mdev) \ argument 40 (MLX5_CAP_GEN(mdev, vport_group_manager) && \ 41 (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ 42 mlx5_core_is_pf(mdev)) 58 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); 59 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, 61 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, 63 int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr); 64 int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, 66 void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline); [all …]
|
| /linux/drivers/infiniband/hw/mlx5/ |
| H A D | main.c | 113 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type); in mlx5_ib_port_link_layer() 211 struct mlx5_core_dev *mdev; in mlx5_netdev_event() local 215 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL); in mlx5_netdev_event() 216 if (!mdev) in mlx5_netdev_event() 230 if (ndev->dev.parent == mdev->device) in mlx5_netdev_event() 249 !mlx5_core_mp_enabled(mdev)) in mlx5_netdev_event() 252 if (mlx5_lag_is_roce(mdev) || mlx5_lag_is_sriov(mdev)) { in mlx5_netdev_event() 255 if(mlx5_lag_is_roce(mdev)) in mlx5_netdev_event() 317 struct mlx5_core_dev *mdev = NULL; in mlx5_ib_get_native_port_mdev() local 325 return ibdev->mdev; in mlx5_ib_get_native_port_mdev() [all …]
|