Lines Matching full:domain
42 /* Assign a cache tag with specified type to domain. */
43 int cache_tag_assign(struct dmar_domain *domain, u16 did, struct device *dev, in cache_tag_assign() argument
67 spin_lock_irqsave(&domain->cache_lock, flags); in cache_tag_assign()
68 prev = &domain->cache_tags; in cache_tag_assign()
69 list_for_each_entry(temp, &domain->cache_tags, node) { in cache_tag_assign()
72 spin_unlock_irqrestore(&domain->cache_lock, flags); in cache_tag_assign()
86 spin_unlock_irqrestore(&domain->cache_lock, flags); in cache_tag_assign()
92 /* Unassign a cache tag with specified type from domain. */
93 static void cache_tag_unassign(struct dmar_domain *domain, u16 did, in cache_tag_unassign() argument
102 spin_lock_irqsave(&domain->cache_lock, flags); in cache_tag_unassign()
103 list_for_each_entry(tag, &domain->cache_tags, node) { in cache_tag_unassign()
113 spin_unlock_irqrestore(&domain->cache_lock, flags); in cache_tag_unassign()
116 /* domain->qi_batch will be freed in iommu_free_domain() path. */
117 static int domain_qi_batch_alloc(struct dmar_domain *domain) in domain_qi_batch_alloc() argument
122 spin_lock_irqsave(&domain->cache_lock, flags); in domain_qi_batch_alloc()
123 if (domain->qi_batch) in domain_qi_batch_alloc()
126 domain->qi_batch = kzalloc_obj(*domain->qi_batch, GFP_ATOMIC); in domain_qi_batch_alloc()
127 if (!domain->qi_batch) in domain_qi_batch_alloc()
130 spin_unlock_irqrestore(&domain->cache_lock, flags); in domain_qi_batch_alloc()
135 static int __cache_tag_assign_domain(struct dmar_domain *domain, u16 did, in __cache_tag_assign_domain() argument
141 ret = domain_qi_batch_alloc(domain); in __cache_tag_assign_domain()
145 ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_IOTLB); in __cache_tag_assign_domain()
149 ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_DEVTLB); in __cache_tag_assign_domain()
151 cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_IOTLB); in __cache_tag_assign_domain()
156 static void __cache_tag_unassign_domain(struct dmar_domain *domain, u16 did, in __cache_tag_unassign_domain() argument
161 cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_IOTLB); in __cache_tag_unassign_domain()
164 cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_DEVTLB); in __cache_tag_unassign_domain()
167 static int __cache_tag_assign_parent_domain(struct dmar_domain *domain, u16 did, in __cache_tag_assign_parent_domain() argument
173 ret = domain_qi_batch_alloc(domain); in __cache_tag_assign_parent_domain()
177 ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB); in __cache_tag_assign_parent_domain()
181 ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_NESTING_DEVTLB); in __cache_tag_assign_parent_domain()
183 cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB); in __cache_tag_assign_parent_domain()
188 static void __cache_tag_unassign_parent_domain(struct dmar_domain *domain, u16 did, in __cache_tag_unassign_parent_domain() argument
193 cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB); in __cache_tag_unassign_parent_domain()
196 cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_NESTING_DEVTLB); in __cache_tag_unassign_parent_domain()
199 static u16 domain_get_id_for_dev(struct dmar_domain *domain, struct device *dev) in domain_get_id_for_dev() argument
205 * The driver assigns different domain IDs for all domains except in domain_get_id_for_dev()
208 if (domain->domain.type == IOMMU_DOMAIN_SVA) in domain_get_id_for_dev()
211 return domain_id_iommu(domain, iommu); in domain_get_id_for_dev()
215 * Assign cache tags to a domain when it's associated with a device's
216 * PASID using a specific domain ID.
219 * domain's cache tag list. On failure (negative return value), an error
222 int cache_tag_assign_domain(struct dmar_domain *domain, in cache_tag_assign_domain() argument
225 u16 did = domain_get_id_for_dev(domain, dev); in cache_tag_assign_domain()
228 ret = __cache_tag_assign_domain(domain, did, dev, pasid); in cache_tag_assign_domain()
229 if (ret || domain->domain.type != IOMMU_DOMAIN_NESTED) in cache_tag_assign_domain()
232 ret = __cache_tag_assign_parent_domain(domain->s2_domain, did, dev, pasid); in cache_tag_assign_domain()
234 __cache_tag_unassign_domain(domain, did, dev, pasid); in cache_tag_assign_domain()
240 * Remove the cache tags associated with a device's PASID when the domain is
243 * The cache tags must be previously assigned to the domain by calling the
246 void cache_tag_unassign_domain(struct dmar_domain *domain, in cache_tag_unassign_domain() argument
249 u16 did = domain_get_id_for_dev(domain, dev); in cache_tag_unassign_domain()
251 __cache_tag_unassign_domain(domain, did, dev, pasid); in cache_tag_unassign_domain()
252 if (domain->domain.type == IOMMU_DOMAIN_NESTED) in cache_tag_unassign_domain()
253 __cache_tag_unassign_parent_domain(domain->s2_domain, did, dev, pasid); in cache_tag_unassign_domain()
366 static bool intel_domain_use_piotlb(struct dmar_domain *domain) in intel_domain_use_piotlb() argument
368 return domain->domain.type == IOMMU_DOMAIN_SVA || in intel_domain_use_piotlb()
369 domain->domain.type == IOMMU_DOMAIN_NESTED || in intel_domain_use_piotlb()
370 intel_domain_is_fs_paging(domain); in intel_domain_use_piotlb()
373 static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *tag, in cache_tag_flush_iotlb() argument
380 if (intel_domain_use_piotlb(domain)) { in cache_tag_flush_iotlb()
382 pages, ih, domain->qi_batch); in cache_tag_flush_iotlb()
387 * Fallback to domain selective flush if no PSI support or the size in cache_tag_flush_iotlb()
400 domain->qi_batch); in cache_tag_flush_iotlb()
405 static void cache_tag_flush_devtlb_psi(struct dmar_domain *domain, struct cache_tag *tag, in cache_tag_flush_devtlb_psi() argument
417 addr, mask, domain->qi_batch); in cache_tag_flush_devtlb_psi()
420 addr, mask, domain->qi_batch); in cache_tag_flush_devtlb_psi()
425 info->ats_qdep, addr, mask, domain->qi_batch); in cache_tag_flush_devtlb_psi()
429 domain->qi_batch); in cache_tag_flush_devtlb_psi()
434 * when the memory mappings in the target domain have been modified.
436 void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start, in cache_tag_flush_range() argument
452 spin_lock_irqsave(&domain->cache_lock, flags); in cache_tag_flush_range()
453 list_for_each_entry(tag, &domain->cache_tags, node) { in cache_tag_flush_range()
455 qi_batch_flush_descs(iommu, domain->qi_batch); in cache_tag_flush_range()
461 cache_tag_flush_iotlb(domain, tag, addr, pages, mask, ih); in cache_tag_flush_range()
475 cache_tag_flush_devtlb_psi(domain, tag, addr, mask); in cache_tag_flush_range()
481 qi_batch_flush_descs(iommu, domain->qi_batch); in cache_tag_flush_range()
482 spin_unlock_irqrestore(&domain->cache_lock, flags); in cache_tag_flush_range()
487 * domain have been modified.
489 void cache_tag_flush_all(struct dmar_domain *domain) in cache_tag_flush_all() argument
491 cache_tag_flush_range(domain, 0, ULONG_MAX, 0); in cache_tag_flush_all()
496 * domain.
505 void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start, in cache_tag_flush_range_np() argument
515 spin_lock_irqsave(&domain->cache_lock, flags); in cache_tag_flush_range_np()
516 list_for_each_entry(tag, &domain->cache_tags, node) { in cache_tag_flush_range_np()
518 qi_batch_flush_descs(iommu, domain->qi_batch); in cache_tag_flush_range_np()
522 intel_domain_is_fs_paging(domain)) { in cache_tag_flush_range_np()
529 cache_tag_flush_iotlb(domain, tag, addr, pages, mask, 0); in cache_tag_flush_range_np()
533 qi_batch_flush_descs(iommu, domain->qi_batch); in cache_tag_flush_range_np()
534 spin_unlock_irqrestore(&domain->cache_lock, flags); in cache_tag_flush_range_np()