Home
last modified time | relevance | path

Searched full:viommu (Results 1 – 7 of 7) sorted by relevance

/linux/drivers/iommu/
H A Dvirtio-iommu.c65 struct viommu_dev *viommu; member
77 struct viommu_dev *viommu; member
137 static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu, in viommu_get_write_desc_offset() argument
144 return len - viommu->probe_size - tail_size; in viommu_get_write_desc_offset()
155 static int __viommu_sync_req(struct viommu_dev *viommu) in __viommu_sync_req() argument
160 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ]; in __viommu_sync_req()
162 assert_spin_locked(&viommu->request_lock); in __viommu_sync_req()
166 while (!list_empty(&viommu->requests)) { in __viommu_sync_req()
188 static int viommu_sync_req(struct viommu_dev *viommu) in viommu_sync_req() argument
193 spin_lock_irqsave(&viommu->request_lock, flags); in viommu_sync_req()
[all …]
/linux/drivers/acpi/
H A Dviot.c48 struct viot_iommu *viommu; member
77 static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu, in viot_get_pci_iommu_fwnode() argument
103 viommu->fwnode = dev_fwnode(&pdev->dev); in viot_get_pci_iommu_fwnode()
108 static int __init viot_get_mmio_iommu_fwnode(struct viot_iommu *viommu, in viot_get_mmio_iommu_fwnode() argument
123 viommu->fwnode = &adev->fwnode; in viot_get_mmio_iommu_fwnode()
130 struct viot_iommu *viommu; in viot_get_iommu() local
138 list_for_each_entry(viommu, &viot_iommus, list) in viot_get_iommu()
139 if (viommu->offset == offset) in viot_get_iommu()
140 return viommu; in viot_get_iommu()
145 viommu = kzalloc(sizeof(*viommu), GFP_KERNEL); in viot_get_iommu()
[all …]
/linux/drivers/iommu/iommufd/
H A Deventq.c226 struct iommufd_viommu *viommu = veventq->viommu; in iommufd_veventq_abort() local
229 lockdep_assert_held_write(&viommu->veventqs_rwsem); in iommufd_veventq_abort()
237 refcount_dec(&viommu->obj.users); in iommufd_veventq_abort()
246 down_write(&veventq->viommu->veventqs_rwsem); in iommufd_veventq_destroy()
248 up_write(&veventq->viommu->veventqs_rwsem); in iommufd_veventq_destroy()
489 struct iommufd_viommu *viommu; in iommufd_veventq_alloc() local
499 viommu = iommufd_get_viommu(ucmd, cmd->viommu_id); in iommufd_veventq_alloc()
500 if (IS_ERR(viommu)) in iommufd_veventq_alloc()
501 return PTR_ERR(viommu); in iommufd_veventq_alloc()
503 down_write(&viommu->veventqs_rwsem); in iommufd_veventq_alloc()
[all …]
H A Diommufd_private.h319 struct iommufd_viommu *viommu; member
509 /* An iommufd_vevent represents a vIOMMU event in an iommufd_veventq */
521 * An iommufd_veventq object represents an interface to deliver vIOMMU events to
523 * a vIOMMU object during the allocations.
527 struct iommufd_viommu *viommu; member
586 iommufd_viommu_find_veventq(struct iommufd_viommu *viommu, u32 type) in iommufd_viommu_find_veventq() argument
590 lockdep_assert_held(&viommu->veventqs_rwsem); in iommufd_viommu_find_veventq()
592 list_for_each_entry_safe(veventq, next, &viommu->veventqs, node) { in iommufd_viommu_find_veventq()
607 struct iommufd_viommu *viommu; member
609 u64 id; /* per-vIOMMU virtual ID */
H A Dselftest.c156 static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu) in to_mock_viommu() argument
158 return container_of(viommu, struct mock_viommu, core); in to_mock_viommu()
167 struct mock_viommu *viommu; member
222 if (new_viommu != mdev->viommu) { in mock_domain_nop_attach()
224 mdev->viommu = new_viommu; in mock_domain_nop_attach()
670 static void mock_viommu_destroy(struct iommufd_viommu *viommu) in mock_viommu_destroy() argument
673 viommu->iommu_dev, struct mock_iommu_device, iommu_dev); in mock_viommu_destroy()
678 /* iommufd core frees mock_viommu and viommu */ in mock_viommu_destroy()
682 mock_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags, in mock_viommu_alloc_domain_nested() argument
685 struct mock_viommu *mock_viommu = to_mock_viommu(viommu); in mock_viommu_alloc_domain_nested()
[all …]
/linux/Documentation/devicetree/bindings/virtio/
H A Dmmio.yaml56 iommus = <&viommu 23>;
59 viommu: iommu@3100 {
/linux/drivers/iommu/amd/
H A Diommu.c1697 * When NpCache is on, we infer that we run in a VM and use a vIOMMU. in amd_iommu_domain_flush_pages()