Lines Matching full:domain
64 struct iommu_domain *domain; member
102 static int __iommu_attach_device(struct iommu_domain *domain,
104 static int __iommu_attach_group(struct iommu_domain *domain,
136 static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
144 static void iommu_domain_init(struct iommu_domain *domain, unsigned int type,
179 * Use a function instead of an array here because the domain-type is a
220 pr_info("Default domain type: %s%s\n", in iommu_subsys_init()
226 pr_info("DMA domain TLB invalidation policy: %s mode%s\n", in iommu_subsys_init()
530 * release_device() must stop using any attached domain on the device. in iommu_deinit_device()
535 * that domain is attached prior to calling release_device. Drivers can in iommu_deinit_device()
541 * domain pointer, as it is going to be freed. in iommu_deinit_device()
551 * be parked on a BLOCKED domain during release as that would in iommu_deinit_device()
559 group->domain); in iommu_deinit_device()
578 group->domain = NULL; in iommu_deinit_device()
594 return ((struct iommu_attach_handle *)xa_untag_pointer(entry))->domain; in pasid_array_entry_to_domain()
641 WARN_ON(group->default_domain && !group->domain); in __iommu_probe_device()
644 if (group->domain) { in __iommu_probe_device()
645 ret = __iommu_device_set_domain(group, dev, group->domain, NULL, in __iommu_probe_device()
711 * released, and the current domain must be set back to NULL or in __iommu_group_free_device()
712 * the default domain. in __iommu_group_free_device()
716 group->domain != group->default_domain); in __iommu_group_free_device()
1172 static int iommu_create_device_direct_mappings(struct iommu_domain *domain, in iommu_create_device_direct_mappings() argument
1180 pg_size = domain->pgsize_bitmap ? 1UL << __ffs(domain->pgsize_bitmap) : 0; in iommu_create_device_direct_mappings()
1183 if (WARN_ON_ONCE(iommu_is_dma_domain(domain) && !pg_size)) in iommu_create_device_direct_mappings()
1198 !iommu_is_dma_domain(domain)) in iommu_create_device_direct_mappings()
1210 phys_addr = iommu_iova_to_phys(domain, addr); in iommu_create_device_direct_mappings()
1218 ret = iommu_map(domain, addr - map_size, in iommu_create_device_direct_mappings()
1672 struct iommu_domain *domain; in __iommu_alloc_identity_domain() local
1678 domain = ops->domain_alloc_identity(dev); in __iommu_alloc_identity_domain()
1679 if (IS_ERR(domain)) in __iommu_alloc_identity_domain()
1680 return domain; in __iommu_alloc_identity_domain()
1685 iommu_domain_init(domain, IOMMU_DOMAIN_IDENTITY, ops); in __iommu_alloc_identity_domain()
1686 return domain; in __iommu_alloc_identity_domain()
1699 * When allocating the DMA API domain assume that the driver is going to in __iommu_group_alloc_default_domain()
1700 * use PASID and make sure the RID's domain is PASID compatible. in __iommu_group_alloc_default_domain()
1708 * try to allocate non-PASID domain in __iommu_group_alloc_default_domain()
1723 * req_type of 0 means "auto" which means to select a domain based on
1735 * Allow legacy drivers to specify the domain that will be the default in iommu_group_alloc_default_domain()
1736 * domain. This should always be either an IDENTITY/BLOCKED/PLATFORM in iommu_group_alloc_default_domain()
1737 * domain. Do not use in new drivers. in iommu_group_alloc_default_domain()
1760 …pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_D… in iommu_group_alloc_default_domain()
1845 * A target_type of 0 will select the best domain type. 0 can be returned in
1860 * domain. Later on ARM_DMA_USE_IOMMU will install its UNMANAGED domain. in iommu_get_default_domain_type()
1942 * We go to the trouble of deferred default domain creation so in bus_iommu_probe()
1943 * that the cross-group default domain type and the setup of the in bus_iommu_probe()
2015 * iommu_set_fault_handler() - set a fault handler for an iommu domain
2016 * @domain: iommu domain
2026 void iommu_set_fault_handler(struct iommu_domain *domain, in iommu_set_fault_handler() argument
2030 if (WARN_ON(!domain || domain->cookie_type != IOMMU_COOKIE_NONE)) in iommu_set_fault_handler()
2033 domain->cookie_type = IOMMU_COOKIE_FAULT_HANDLER; in iommu_set_fault_handler()
2034 domain->handler = handler; in iommu_set_fault_handler()
2035 domain->handler_token = token; in iommu_set_fault_handler()
2039 static void iommu_domain_init(struct iommu_domain *domain, unsigned int type, in iommu_domain_init() argument
2042 domain->type = type; in iommu_domain_init()
2043 domain->owner = ops; in iommu_domain_init()
2044 if (!domain->ops) in iommu_domain_init()
2045 domain->ops = ops->default_domain_ops; in iommu_domain_init()
2053 struct iommu_domain *domain; in __iommu_paging_domain_alloc_flags() local
2061 domain = ops->domain_alloc_paging(dev); in __iommu_paging_domain_alloc_flags()
2063 domain = ops->domain_alloc_paging_flags(dev, flags, NULL); in __iommu_paging_domain_alloc_flags()
2066 domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED); in __iommu_paging_domain_alloc_flags()
2071 if (IS_ERR(domain)) in __iommu_paging_domain_alloc_flags()
2072 return domain; in __iommu_paging_domain_alloc_flags()
2073 if (!domain) in __iommu_paging_domain_alloc_flags()
2076 iommu_domain_init(domain, type, ops); in __iommu_paging_domain_alloc_flags()
2077 return domain; in __iommu_paging_domain_alloc_flags()
2081 * iommu_paging_domain_alloc_flags() - Allocate a paging domain
2082 * @dev: device for which the domain is allocated
2085 * Allocate a paging domain which will be managed by a kernel driver. Return
2086 * allocated domain if successful, or an ERR pointer for failure.
2096 void iommu_domain_free(struct iommu_domain *domain) in iommu_domain_free() argument
2098 switch (domain->cookie_type) { in iommu_domain_free()
2100 iommu_put_dma_cookie(domain); in iommu_domain_free()
2103 iommu_put_msi_cookie(domain); in iommu_domain_free()
2106 mmdrop(domain->mm); in iommu_domain_free()
2111 if (domain->ops->free) in iommu_domain_free()
2112 domain->ops->free(domain); in iommu_domain_free()
2117 * Put the group's domain back to the appropriate core-owned domain - either the
2118 * standard kernel-mode DMA configuration or an all-DMA-blocked domain.
2132 static int __iommu_attach_device(struct iommu_domain *domain, in __iommu_attach_device() argument
2137 if (unlikely(domain->ops->attach_dev == NULL)) in __iommu_attach_device()
2140 ret = domain->ops->attach_dev(domain, dev, old); in __iommu_attach_device()
2149 * iommu_attach_device - Attach an IOMMU domain to a device
2150 * @domain: IOMMU domain to attach
2156 * that certain configuration of the domain is incompatible with
2157 * the device. In this case attaching a different domain to the
2160 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) in iommu_attach_device() argument
2178 ret = __iommu_attach_group(domain, group); in iommu_attach_device()
2186 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) in iommu_deferred_attach() argument
2189 return __iommu_attach_device(domain, dev, NULL); in iommu_deferred_attach()
2194 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) in iommu_detach_device() argument
2203 if (WARN_ON(domain != group->domain) || in iommu_detach_device()
2221 return group->domain; in iommu_get_domain_for_dev()
2227 * guarantees that the group and its default domain are valid and correct.
2234 static void *iommu_make_pasid_array_entry(struct iommu_domain *domain, in iommu_make_pasid_array_entry() argument
2238 handle->domain = domain; in iommu_make_pasid_array_entry()
2242 return xa_tag_pointer(domain, IOMMU_PASID_ARRAY_DOMAIN); in iommu_make_pasid_array_entry()
2246 struct iommu_domain *domain) in domain_iommu_ops_compatible() argument
2248 if (domain->owner == ops) in domain_iommu_ops_compatible()
2252 if (domain == ops->blocked_domain || domain == ops->identity_domain) in domain_iommu_ops_compatible()
2258 static int __iommu_attach_group(struct iommu_domain *domain, in __iommu_attach_group() argument
2263 if (group->domain && group->domain != group->default_domain && in __iommu_attach_group()
2264 group->domain != group->blocking_domain) in __iommu_attach_group()
2269 !domain_iommu_ops_compatible(dev_iommu_ops(dev), domain)) in __iommu_attach_group()
2272 return __iommu_group_set_domain(group, domain); in __iommu_attach_group()
2276 * iommu_attach_group - Attach an IOMMU domain to an IOMMU group
2277 * @domain: IOMMU domain to attach
2283 * that certain configuration of the domain is incompatible with
2284 * the group. In this case attaching a different domain to the
2287 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_attach_group() argument
2292 ret = __iommu_attach_group(domain, group); in iommu_attach_group()
2309 * the blocking domain to be attached as it does not contain the in __iommu_device_set_domain()
2331 * If we have a blocking domain then try to attach that in hopes in __iommu_device_set_domain()
2346 * If 0 is returned the group's domain is new_domain. If an error is returned
2347 * then the group's domain will be set back to the existing domain unless
2350 * previously good domain. We try to avoid a kernel UAF because of this.
2371 if (group->domain == new_domain) in __iommu_group_set_domain_internal()
2378 * Changing the domain is done by calling attach_dev() on the new in __iommu_group_set_domain_internal()
2379 * domain. This switch does not have to be atomic and DMA can be in __iommu_group_set_domain_internal()
2381 * either new_domain or group->domain, never something else. in __iommu_group_set_domain_internal()
2386 group->domain, flags); in __iommu_group_set_domain_internal()
2391 * driver fails attach to an otherwise good domain, and in __iommu_group_set_domain_internal()
2393 * drop its reference on the current domain so we don't in __iommu_group_set_domain_internal()
2401 group->domain = new_domain; in __iommu_group_set_domain_internal()
2407 * always allow us to attach to a domain that was already attached. in __iommu_group_set_domain_internal()
2411 /* No need to revert the last gdev that failed to set domain */ in __iommu_group_set_domain_internal()
2415 * A NULL domain can happen only for first probe, in which case in __iommu_group_set_domain_internal()
2416 * we leave group->domain as NULL and let release clean in __iommu_group_set_domain_internal()
2419 if (group->domain) in __iommu_group_set_domain_internal()
2421 group, gdev->dev, group->domain, new_domain, in __iommu_group_set_domain_internal()
2427 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_detach_group() argument
2435 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) in iommu_iova_to_phys() argument
2437 if (domain->type == IOMMU_DOMAIN_IDENTITY) in iommu_iova_to_phys()
2440 if (domain->type == IOMMU_DOMAIN_BLOCKED) in iommu_iova_to_phys()
2443 return domain->ops->iova_to_phys(domain, iova); in iommu_iova_to_phys()
2447 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, in iommu_pgsize() argument
2457 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0); in iommu_pgsize()
2473 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); in iommu_pgsize()
2503 int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova, in iommu_map_nosync() argument
2506 const struct iommu_domain_ops *ops = domain->ops; in iommu_map_nosync()
2515 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) in iommu_map_nosync()
2518 if (WARN_ON(!ops->map_pages || domain->pgsize_bitmap == 0UL)) in iommu_map_nosync()
2527 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); in iommu_map_nosync()
2545 pgsize = iommu_pgsize(domain, iova, paddr, size, &count); in iommu_map_nosync()
2549 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot, in iommu_map_nosync()
2566 iommu_unmap(domain, orig_iova, orig_size - size); in iommu_map_nosync()
2573 int iommu_sync_map(struct iommu_domain *domain, unsigned long iova, size_t size) in iommu_sync_map() argument
2575 const struct iommu_domain_ops *ops = domain->ops; in iommu_sync_map()
2579 return ops->iotlb_sync_map(domain, iova, size); in iommu_sync_map()
2582 int iommu_map(struct iommu_domain *domain, unsigned long iova, in iommu_map() argument
2587 ret = iommu_map_nosync(domain, iova, paddr, size, prot, gfp); in iommu_map()
2591 ret = iommu_sync_map(domain, iova, size); in iommu_map()
2593 iommu_unmap(domain, iova, size); in iommu_map()
2599 static size_t __iommu_unmap(struct iommu_domain *domain, in __iommu_unmap() argument
2603 const struct iommu_domain_ops *ops = domain->ops; in __iommu_unmap()
2608 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) in __iommu_unmap()
2611 if (WARN_ON(!ops->unmap_pages || domain->pgsize_bitmap == 0UL)) in __iommu_unmap()
2615 min_pagesz = 1 << __ffs(domain->pgsize_bitmap); in __iommu_unmap()
2637 pgsize = iommu_pgsize(domain, iova, iova, size - unmapped, &count); in __iommu_unmap()
2638 unmapped_page = ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather); in __iommu_unmap()
2655 * @domain: Domain to manipulate
2667 size_t iommu_unmap(struct iommu_domain *domain, in iommu_unmap() argument
2674 ret = __iommu_unmap(domain, iova, size, &iotlb_gather); in iommu_unmap()
2675 iommu_iotlb_sync(domain, &iotlb_gather); in iommu_unmap()
2683 * @domain: Domain to manipulate
2700 size_t iommu_unmap_fast(struct iommu_domain *domain, in iommu_unmap_fast() argument
2704 return __iommu_unmap(domain, iova, size, iotlb_gather); in iommu_unmap_fast()
2708 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, in iommu_map_sg() argument
2721 ret = iommu_map_nosync(domain, iova + mapped, start, in iommu_map_sg()
2745 ret = iommu_sync_map(domain, iova, mapped); in iommu_map_sg()
2753 iommu_unmap(domain, iova, mapped); in iommu_map_sg()
2761 * @domain: the iommu domain where the fault has happened
2783 int report_iommu_fault(struct iommu_domain *domain, struct device *dev, in report_iommu_fault() argument
2792 if (domain->cookie_type == IOMMU_COOKIE_FAULT_HANDLER && in report_iommu_fault()
2793 domain->handler) in report_iommu_fault()
2794 ret = domain->handler(domain, dev, iova, flags, in report_iommu_fault()
2795 domain->handler_token); in report_iommu_fault()
2814 int iommu_set_pgtable_quirks(struct iommu_domain *domain, in iommu_set_pgtable_quirks() argument
2817 if (domain->type != IOMMU_DOMAIN_UNMANAGED) in iommu_set_pgtable_quirks()
2819 if (!domain->ops->set_pgtable_quirks) in iommu_set_pgtable_quirks()
2821 return domain->ops->set_pgtable_quirks(domain, quirk); in iommu_set_pgtable_quirks()
2831 * A domain user should not map IOVA in these ranges.
2991 * @target_type: Domain type to set as the default_domain
2993 * Allocate a default domain and set it as the current domain on the group. If
2994 * the group already has a default domain it will be changed to the target_type.
2995 * When target_type is 0 the default domain is selected based on driver and
3046 if (!group->domain) { in iommu_setup_default_domain()
3048 * Drivers are not allowed to fail the first domain attach. in iommu_setup_default_domain()
3050 * iommu driver and call ops->release_device. Put the domain in iommu_setup_default_domain()
3064 * Drivers are supposed to allow mappings to be installed in a domain in iommu_setup_default_domain()
3095 * Changing the default domain through sysfs requires the users to unbind the
3127 /* We can bring up a flush queue without tearing down the domain. */ in iommu_group_store_type()
3182 if (group->domain != group->default_domain || group->owner || in iommu_device_use_default_domain()
3223 struct iommu_domain *domain; in __iommu_group_alloc_blocking_domain() local
3235 * empty PAGING domain instead. in __iommu_group_alloc_blocking_domain()
3237 domain = iommu_paging_domain_alloc(dev); in __iommu_group_alloc_blocking_domain()
3238 if (IS_ERR(domain)) in __iommu_group_alloc_blocking_domain()
3239 return PTR_ERR(domain); in __iommu_group_alloc_blocking_domain()
3240 group->blocking_domain = domain; in __iommu_group_alloc_blocking_domain()
3248 if ((group->domain && group->domain != group->default_domain) || in __iommu_take_dma_ownership()
3397 struct iommu_domain *domain) in iommu_remove_dev_pasid() argument
3403 dev, pasid, domain)); in iommu_remove_dev_pasid()
3406 static int __iommu_set_group_pasid(struct iommu_domain *domain, in __iommu_set_group_pasid() argument
3415 ret = domain->ops->set_dev_pasid(domain, device->dev, in __iommu_set_group_pasid()
3431 * If no old domain, undo the succeeded devices/pasid. in __iommu_set_group_pasid()
3433 * the old domain. And it is a driver bug to fail in __iommu_set_group_pasid()
3434 * attaching with a previously good domain. in __iommu_set_group_pasid()
3438 pasid, domain))) in __iommu_set_group_pasid()
3439 iommu_remove_dev_pasid(device->dev, pasid, domain); in __iommu_set_group_pasid()
3447 struct iommu_domain *domain) in __iommu_remove_group_pasid() argument
3453 iommu_remove_dev_pasid(device->dev, pasid, domain); in __iommu_remove_group_pasid()
3458 * iommu_attach_device_pasid() - Attach a domain to pasid of device
3459 * @domain: the iommu domain.
3469 int iommu_attach_device_pasid(struct iommu_domain *domain, in iommu_attach_device_pasid() argument
3485 if (!domain->ops->set_dev_pasid || in iommu_attach_device_pasid()
3490 if (!domain_iommu_ops_compatible(ops, domain) || in iommu_attach_device_pasid()
3508 entry = iommu_make_pasid_array_entry(domain, handle); in iommu_attach_device_pasid()
3518 ret = __iommu_set_group_pasid(domain, group, pasid, NULL); in iommu_attach_device_pasid()
3526 * held, this cannot fail. The new domain cannot be visible until the in iommu_attach_device_pasid()
3540 * iommu_replace_device_pasid - Replace the domain that a specific pasid
3542 * @domain: the new iommu domain
3556 int iommu_replace_device_pasid(struct iommu_domain *domain, in iommu_replace_device_pasid() argument
3570 if (!domain->ops->set_dev_pasid) in iommu_replace_device_pasid()
3573 if (!domain_iommu_ops_compatible(dev_iommu_ops(dev), domain) || in iommu_replace_device_pasid()
3578 entry = iommu_make_pasid_array_entry(domain, handle); in iommu_replace_device_pasid()
3587 * No domain (with or without handle) attached, hence not in iommu_replace_device_pasid()
3610 if (curr_domain != domain) { in iommu_replace_device_pasid()
3611 ret = __iommu_set_group_pasid(domain, group, in iommu_replace_device_pasid()
3631 * iommu_detach_device_pasid() - Detach the domain from pasid of device
3632 * @domain: the iommu domain.
3636 * The @domain must have been attached to @pasid of the @dev with
3639 void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, in iommu_detach_device_pasid() argument
3646 __iommu_remove_group_pasid(group, pasid, domain); in iommu_detach_device_pasid()
3681 * @group: the iommu group that domain was attached to
3683 * @type: matched domain type, 0 for any match
3688 * handle is from the time when the domain is attached to the time when the
3689 * domain is detached. Callers are required to synchronize the call of
3690 * iommu_attach_handle_get() with domain attachment and detachment. The attach
3705 if (type && handle->domain->type != type) in iommu_attach_handle_get()
3715 * iommu_attach_group_handle - Attach an IOMMU domain to an IOMMU group
3716 * @domain: IOMMU domain to attach
3723 * an attach handle and use it when the domain is attached. This is currently
3729 int iommu_attach_group_handle(struct iommu_domain *domain, in iommu_attach_group_handle() argument
3740 entry = iommu_make_pasid_array_entry(domain, handle); in iommu_attach_group_handle()
3746 ret = __iommu_attach_group(domain, group); in iommu_attach_group_handle()
3754 * held, this cannot fail. The new domain cannot be visible until the in iommu_attach_group_handle()
3768 * iommu_detach_group_handle - Detach an IOMMU domain from an IOMMU group
3769 * @domain: IOMMU domain to attach
3772 * Detach the specified IOMMU domain from the specified IOMMU group.
3775 void iommu_detach_group_handle(struct iommu_domain *domain, in iommu_detach_group_handle() argument
3786 * iommu_replace_group_handle - replace the domain that a group is attached to
3787 * @group: IOMMU group that will be attached to the new domain
3788 * @new_domain: new IOMMU domain to replace with
3792 * the blocking domain in-between. It allows the caller to provide an attach
3793 * handle for the new domain and use it when the domain is attached.
3795 * If the currently attached domain is a core domain (e.g. a default_domain),
3837 * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
3842 * an IOVA in the domain and call msi_desc_set_iommu_msi_iova() with the
3857 /* An IDENTITY domain must pass through */ in iommu_dma_prepare_msi()
3858 if (group->domain && group->domain->type != IOMMU_DOMAIN_IDENTITY) { in iommu_dma_prepare_msi()
3859 switch (group->domain->cookie_type) { in iommu_dma_prepare_msi()
3862 ret = iommu_dma_sw_msi(group->domain, desc, msi_addr); in iommu_dma_prepare_msi()
3865 ret = iommufd_sw_msi(group->domain, desc, msi_addr); in iommu_dma_prepare_msi()