Lines Matching full:domain

42 /* Assign a cache tag with specified type to domain. */
43 static int cache_tag_assign(struct dmar_domain *domain, u16 did, in cache_tag_assign() argument
68 spin_lock_irqsave(&domain->cache_lock, flags); in cache_tag_assign()
69 prev = &domain->cache_tags; in cache_tag_assign()
70 list_for_each_entry(temp, &domain->cache_tags, node) { in cache_tag_assign()
73 spin_unlock_irqrestore(&domain->cache_lock, flags); in cache_tag_assign()
87 spin_unlock_irqrestore(&domain->cache_lock, flags); in cache_tag_assign()
93 /* Unassign a cache tag with specified type from domain. */
94 static void cache_tag_unassign(struct dmar_domain *domain, u16 did, in cache_tag_unassign() argument
103 spin_lock_irqsave(&domain->cache_lock, flags); in cache_tag_unassign()
104 list_for_each_entry(tag, &domain->cache_tags, node) { in cache_tag_unassign()
114 spin_unlock_irqrestore(&domain->cache_lock, flags); in cache_tag_unassign()
117 /* domain->qi_batch will be freed in iommu_free_domain() path. */
118 static int domain_qi_batch_alloc(struct dmar_domain *domain) in domain_qi_batch_alloc() argument
123 spin_lock_irqsave(&domain->cache_lock, flags); in domain_qi_batch_alloc()
124 if (domain->qi_batch) in domain_qi_batch_alloc()
127 domain->qi_batch = kzalloc(sizeof(*domain->qi_batch), GFP_ATOMIC); in domain_qi_batch_alloc()
128 if (!domain->qi_batch) in domain_qi_batch_alloc()
131 spin_unlock_irqrestore(&domain->cache_lock, flags); in domain_qi_batch_alloc()
136 static int __cache_tag_assign_domain(struct dmar_domain *domain, u16 did, in __cache_tag_assign_domain() argument
142 ret = domain_qi_batch_alloc(domain); in __cache_tag_assign_domain()
146 ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_IOTLB); in __cache_tag_assign_domain()
150 ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_DEVTLB); in __cache_tag_assign_domain()
152 cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_IOTLB); in __cache_tag_assign_domain()
157 static void __cache_tag_unassign_domain(struct dmar_domain *domain, u16 did, in __cache_tag_unassign_domain() argument
162 cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_IOTLB); in __cache_tag_unassign_domain()
165 cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_DEVTLB); in __cache_tag_unassign_domain()
168 static int __cache_tag_assign_parent_domain(struct dmar_domain *domain, u16 did, in __cache_tag_assign_parent_domain() argument
174 ret = domain_qi_batch_alloc(domain); in __cache_tag_assign_parent_domain()
178 ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB); in __cache_tag_assign_parent_domain()
182 ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_NESTING_DEVTLB); in __cache_tag_assign_parent_domain()
184 cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB); in __cache_tag_assign_parent_domain()
189 static void __cache_tag_unassign_parent_domain(struct dmar_domain *domain, u16 did, in __cache_tag_unassign_parent_domain() argument
194 cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB); in __cache_tag_unassign_parent_domain()
197 cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_NESTING_DEVTLB); in __cache_tag_unassign_parent_domain()
200 static u16 domain_get_id_for_dev(struct dmar_domain *domain, struct device *dev) in domain_get_id_for_dev() argument
206 * The driver assigns different domain IDs for all domains except in domain_get_id_for_dev()
209 if (domain->domain.type == IOMMU_DOMAIN_SVA) in domain_get_id_for_dev()
212 return domain_id_iommu(domain, iommu); in domain_get_id_for_dev()
216 * Assign cache tags to a domain when it's associated with a device's
217 * PASID using a specific domain ID.
220 * domain's cache tag list. On failure (negative return value), an error
223 int cache_tag_assign_domain(struct dmar_domain *domain, in cache_tag_assign_domain() argument
226 u16 did = domain_get_id_for_dev(domain, dev); in cache_tag_assign_domain()
229 ret = __cache_tag_assign_domain(domain, did, dev, pasid); in cache_tag_assign_domain()
230 if (ret || domain->domain.type != IOMMU_DOMAIN_NESTED) in cache_tag_assign_domain()
233 ret = __cache_tag_assign_parent_domain(domain->s2_domain, did, dev, pasid); in cache_tag_assign_domain()
235 __cache_tag_unassign_domain(domain, did, dev, pasid); in cache_tag_assign_domain()
241 * Remove the cache tags associated with a device's PASID when the domain is
244 * The cache tags must be previously assigned to the domain by calling the
247 void cache_tag_unassign_domain(struct dmar_domain *domain, in cache_tag_unassign_domain() argument
250 u16 did = domain_get_id_for_dev(domain, dev); in cache_tag_unassign_domain()
252 __cache_tag_unassign_domain(domain, did, dev, pasid); in cache_tag_unassign_domain()
253 if (domain->domain.type == IOMMU_DOMAIN_NESTED) in cache_tag_unassign_domain()
254 __cache_tag_unassign_parent_domain(domain->s2_domain, did, dev, pasid); in cache_tag_unassign_domain()
367 static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *tag, in cache_tag_flush_iotlb() argument
374 if (domain->use_first_level) { in cache_tag_flush_iotlb()
376 pages, ih, domain->qi_batch); in cache_tag_flush_iotlb()
381 * Fallback to domain selective flush if no PSI support or the size in cache_tag_flush_iotlb()
394 domain->qi_batch); in cache_tag_flush_iotlb()
399 static void cache_tag_flush_devtlb_psi(struct dmar_domain *domain, struct cache_tag *tag, in cache_tag_flush_devtlb_psi() argument
411 addr, mask, domain->qi_batch); in cache_tag_flush_devtlb_psi()
414 addr, mask, domain->qi_batch); in cache_tag_flush_devtlb_psi()
419 info->ats_qdep, addr, mask, domain->qi_batch); in cache_tag_flush_devtlb_psi()
423 domain->qi_batch); in cache_tag_flush_devtlb_psi()
426 static void cache_tag_flush_devtlb_all(struct dmar_domain *domain, struct cache_tag *tag) in cache_tag_flush_devtlb_all() argument
436 MAX_AGAW_PFN_WIDTH, domain->qi_batch); in cache_tag_flush_devtlb_all()
439 MAX_AGAW_PFN_WIDTH, domain->qi_batch); in cache_tag_flush_devtlb_all()
444 * when the memory mappings in the target domain have been modified.
446 void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start, in cache_tag_flush_range() argument
456 spin_lock_irqsave(&domain->cache_lock, flags); in cache_tag_flush_range()
457 list_for_each_entry(tag, &domain->cache_tags, node) { in cache_tag_flush_range()
459 qi_batch_flush_descs(iommu, domain->qi_batch); in cache_tag_flush_range()
465 cache_tag_flush_iotlb(domain, tag, addr, pages, mask, ih); in cache_tag_flush_range()
479 cache_tag_flush_devtlb_psi(domain, tag, addr, mask); in cache_tag_flush_range()
485 qi_batch_flush_descs(iommu, domain->qi_batch); in cache_tag_flush_range()
486 spin_unlock_irqrestore(&domain->cache_lock, flags); in cache_tag_flush_range()
491 * domain have been modified.
493 void cache_tag_flush_all(struct dmar_domain *domain) in cache_tag_flush_all() argument
499 spin_lock_irqsave(&domain->cache_lock, flags); in cache_tag_flush_all()
500 list_for_each_entry(tag, &domain->cache_tags, node) { in cache_tag_flush_all()
502 qi_batch_flush_descs(iommu, domain->qi_batch); in cache_tag_flush_all()
508 cache_tag_flush_iotlb(domain, tag, 0, -1, 0, 0); in cache_tag_flush_all()
512 cache_tag_flush_devtlb_all(domain, tag); in cache_tag_flush_all()
518 qi_batch_flush_descs(iommu, domain->qi_batch); in cache_tag_flush_all()
519 spin_unlock_irqrestore(&domain->cache_lock, flags); in cache_tag_flush_all()
524 * domain.
533 void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start, in cache_tag_flush_range_np() argument
543 spin_lock_irqsave(&domain->cache_lock, flags); in cache_tag_flush_range_np()
544 list_for_each_entry(tag, &domain->cache_tags, node) { in cache_tag_flush_range_np()
546 qi_batch_flush_descs(iommu, domain->qi_batch); in cache_tag_flush_range_np()
549 if (!cap_caching_mode(iommu->cap) || domain->use_first_level) { in cache_tag_flush_range_np()
556 cache_tag_flush_iotlb(domain, tag, addr, pages, mask, 0); in cache_tag_flush_range_np()
560 qi_batch_flush_descs(iommu, domain->qi_batch); in cache_tag_flush_range_np()
561 spin_unlock_irqrestore(&domain->cache_lock, flags); in cache_tag_flush_range_np()