Lines Matching full:ua

33 	u64 ua;			/* userspace address */  member
39 * We need to convert ua to hpa in real mode. Make it
56 static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, in mm_iommu_do_alloc() argument
88 * we use @ua and @entries natural alignment to allow IOMMU pages in mm_iommu_do_alloc()
91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); in mm_iommu_do_alloc()
106 ret = pin_user_pages(ua + (entry << PAGE_SHIFT), n, in mm_iommu_do_alloc()
127 mem->ua = ua; in mm_iommu_do_alloc()
135 if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) && in mm_iommu_do_alloc()
136 (ua < (mem2->ua + in mm_iommu_do_alloc()
186 long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries, in mm_iommu_new() argument
189 return mm_iommu_do_alloc(mm, ua, entries, MM_IOMMU_TABLE_INVALID_HPA, in mm_iommu_new()
194 long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, in mm_iommu_newdev() argument
198 return mm_iommu_do_alloc(mm, ua, entries, dev_hpa, pmem); in mm_iommu_newdev()
289 unsigned long ua, unsigned long size) in mm_iommu_lookup() argument
295 if ((mem->ua <= ua) && in mm_iommu_lookup()
296 (ua + size <= mem->ua + in mm_iommu_lookup()
309 unsigned long ua, unsigned long entries) in mm_iommu_get() argument
317 if ((mem->ua == ua) && (mem->entries == entries)) { in mm_iommu_get()
331 unsigned long ua, unsigned int pageshift, unsigned long *hpa) in mm_iommu_ua_to_hpa() argument
333 const long entry = (ua - mem->ua) >> PAGE_SHIFT; in mm_iommu_ua_to_hpa()
343 *hpa = mem->dev_hpa + (ua - mem->ua); in mm_iommu_ua_to_hpa()
348 *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK); in mm_iommu_ua_to_hpa()