Lines Matching full:range
29 iommu_from_common(pts->range->common)->iommu_device, in flush_writes_range()
38 iommu_from_common(pts->range->common)->iommu_device, in flush_writes_item()
70 static int make_range_ul(struct pt_common *common, struct pt_range *range, in make_range_ul() argument
81 *range = pt_make_range(common, iova, last); in make_range_ul()
82 if (sizeof(iova) > sizeof(range->va)) { in make_range_ul()
83 if (unlikely(range->va != iova || range->last_va != last)) in make_range_ul()
90 struct pt_range *range, u64 iova, in make_range_u64() argument
95 return make_range_ul(common, range, iova, len); in make_range_u64()
102 #define make_range_no_check(common, range, iova, len) \ argument
107 ret = make_range_u64(common, range, iova, len); \
109 ret = make_range_ul(common, range, iova, len); \
113 #define make_range(common, range, iova, len) \ argument
115 int ret = make_range_no_check(common, range, iova, len); \
117 ret = pt_check_range(range); \
124 struct pt_iommu *iommu_table = iommu_from_common(pts->range->common); in compute_best_pgsize()
135 pts->range->va, pts->range->last_va, oa); in compute_best_pgsize()
138 static __always_inline int __do_iova_to_phys(struct pt_range *range, void *arg, in __do_iova_to_phys() argument
143 struct pt_state pts = pt_init(range, level, table); in __do_iova_to_phys()
167 * Context: The caller must hold a read range lock that includes @iova.
176 struct pt_range range; in DOMAIN_NS() local
180 ret = make_range(common_from_iommu(iommu_table), &range, iova, 1); in DOMAIN_NS()
184 ret = pt_walk_range(&range, __iova_to_phys, &res); in DOMAIN_NS()
217 iova_bitmap_set(dirty->dirty->bitmap, pts->range->va, in record_dirty()
227 pts->range->va, dirty_len); in record_dirty()
231 static inline int __read_and_clear_dirty(struct pt_range *range, void *arg, in __read_and_clear_dirty() argument
235 struct pt_state pts = pt_init(range, level, table); in __read_and_clear_dirty()
261 * Iterate over all the entries in the mapped range and record their write dirty
266 * Context: The caller must hold a read range lock that includes @iova.
281 struct pt_range range; in DOMAIN_NS() local
288 ret = make_range(common_from_iommu(iommu_table), &range, iova, size); in DOMAIN_NS()
292 ret = pt_walk_range(&range, __read_and_clear_dirty, &dirty_args); in DOMAIN_NS()
298 static inline int __set_dirty(struct pt_range *range, void *arg, in __set_dirty() argument
301 struct pt_state pts = pt_init(range, level, table); in __set_dirty()
319 struct pt_range range; in NS() local
322 ret = make_range(common_from_iommu(iommu_table), &range, iova, 1); in NS()
330 return pt_walk_range(&range, __set_dirty, NULL); in NS()
335 /* Fail if any OAs are within the range */
339 static int __collect_tables(struct pt_range *range, void *arg, in __collect_tables() argument
342 struct pt_state pts = pt_init(range, level, table); in __collect_tables()
406 pt_init(parent_pts->range, parent_pts->level - 1, NULL); in table_alloc()
408 return _table_alloc(parent_pts->range->common, in table_alloc()
432 iommu_from_common(pts->range->common)->iommu_device); in pt_iommu_new_table()
451 table_mem, iommu_from_common(pts->range->common) in pt_iommu_new_table()
478 iommu_from_common(start_pts->range->common); in clear_contig()
479 struct pt_range range = *start_pts->range; in clear_contig() local
481 pt_init(&range, start_pts->level, start_pts->table); in clear_contig()
506 iotlb_gather, iommu_table, range.va, in clear_contig()
516 static int __map_range_leaf(struct pt_range *range, void *arg, in __map_range_leaf() argument
519 struct pt_state pts = pt_init(range, level, table); in __map_range_leaf()
565 static int __map_range(struct pt_range *range, void *arg, unsigned int level, in __map_range() argument
568 struct pt_state pts = pt_init(range, level, table); in __map_range()
636 static __always_inline int __do_map_single_page(struct pt_range *range, in __do_map_single_page() argument
641 struct pt_state pts = pt_init(range, level, table); in __do_map_single_page()
663 * encompass range.
665 static int increase_top(struct pt_iommu *iommu_table, struct pt_range *range, in increase_top() argument
683 top_range.va = range->va; in increase_top()
684 top_range.last_va = range->last_va; in increase_top()
770 static int check_map_range(struct pt_iommu *iommu_table, struct pt_range *range, in check_map_range() argument
777 ret = pt_check_range(range); in check_map_range()
781 if (!ret && map->leaf_level <= range->top_level) in check_map_range()
784 ret = increase_top(iommu_table, range, map); in check_map_range()
789 *range = pt_make_range(common, range->va, range->last_va); in check_map_range()
791 PT_WARN_ON(pt_check_range(range)); in check_map_range()
795 static int do_map(struct pt_range *range, struct pt_common *common, in do_map() argument
805 ret = pt_walk_range(range, __map_single_page, map); in do_map()
811 if (map->leaf_level == range->top_level) in do_map()
812 return pt_walk_range(range, __map_range_leaf, map); in do_map()
813 return pt_walk_range(range, __map_range, map); in do_map()
817 * map_pages() - Install translation for an IOVA range
822 * @pgcount: Length of the range in pgsize units starting from @iova
827 * The range starting at IOVA will have paddr installed into it. The caller
828 * must specify a valid pgsize and pgcount to segment the range into compatible
831 * On error the caller will probably want to invoke unmap on the range from iova
835 * Context: The caller must hold a write range lock that includes the whole
836 * range.
857 struct pt_range range; in DOMAIN_NS() local
877 ret = make_range_no_check(common, &range, iova, len); in DOMAIN_NS()
892 pgsize_bitmap, range.va, range.last_va, paddr); in DOMAIN_NS()
899 ret = check_map_range(iommu_table, &range, &map); in DOMAIN_NS()
903 PT_WARN_ON(map.leaf_level > range.top_level); in DOMAIN_NS()
905 ret = do_map(&range, common, single_page, &map); in DOMAIN_NS()
926 static __maybe_unused int __unmap_range(struct pt_range *range, void *arg, in __unmap_range() argument
930 struct pt_state pts = pt_init(range, level, table); in __unmap_range()
951 if (log2_mod(range->va, pt_entry_oa_lg2sz(&pts))) in __unmap_range()
978 * If the unmapping range fully covers the table then we in __unmap_range()
1013 * unmap_pages() - Make a range of IOVA empty/not present
1017 * @pgcount: Length of the range in pgsize units starting from @iova
1022 * ranges that match those passed to map_pages(). The IOVA range can aggregate
1023 * contiguous map_pages() calls so long as no individual range is split.
1025 * Context: The caller must hold a write range lock that includes
1026 * the whole range.
1040 struct pt_range range; in DOMAIN_NS() local
1043 ret = make_range(common_from_iommu(iommu_table), &range, iova, len); in DOMAIN_NS()
1047 pt_walk_range(&range, __unmap_range, &unmap); in DOMAIN_NS()
1060 struct pt_range range = pt_top_range(common); in NS() local
1061 struct pt_state pts = pt_init_top(&range); in NS()
1072 for (pts.level = 0; pts.level <= range.top_level; pts.level++) in NS()
1083 struct pt_range range = pt_all_range(common); in NS() local
1088 iommu_pages_list_add(&collect.free_list, range.top_table); in NS()
1089 pt_walk_range(&range, __collect_tables, &collect); in NS()
1137 struct pt_state pts = { .range = &top_range, in pt_init_common()
1158 struct pt_range range; in pt_iommu_init_domain() local
1166 range = _pt_top_range(common, in pt_iommu_init_domain()
1169 range = pt_top_range(common); in pt_iommu_init_domain()
1172 domain->geometry.aperture_start = (unsigned long)range.va; in pt_iommu_init_domain()
1173 if ((pt_vaddr_t)domain->geometry.aperture_start != range.va) in pt_iommu_init_domain()
1181 * than the top range. aperture_end should be called aperture_last. in pt_iommu_init_domain()
1183 domain->geometry.aperture_end = (unsigned long)range.last_va; in pt_iommu_init_domain()
1184 if ((pt_vaddr_t)domain->geometry.aperture_end != range.last_va) { in pt_iommu_init_domain()