Lines Matching full:area
25 struct iopt_area *area; member
40 iter->area = iopt_area_iter_first(iopt, iova, iova); in iopt_area_contig_init()
41 if (!iter->area) in iopt_area_contig_init()
43 if (!iter->area->pages) { in iopt_area_contig_init()
44 iter->area = NULL; in iopt_area_contig_init()
47 return iter->area; in iopt_area_contig_init()
54 if (!iter->area) in iopt_area_contig_next()
56 last_iova = iopt_area_last_iova(iter->area); in iopt_area_contig_next()
61 iter->area = iopt_area_iter_next(iter->area, iter->cur_iova, in iopt_area_contig_next()
63 if (!iter->area) in iopt_area_contig_next()
65 if (iter->cur_iova != iopt_area_iova(iter->area) || in iopt_area_contig_next()
66 !iter->area->pages) { in iopt_area_contig_next()
67 iter->area = NULL; in iopt_area_contig_next()
70 return iter->area; in iopt_area_contig_next()
205 * The area takes a slice of the pages from start_bytes to start_byte + length
207 static int iopt_insert_area(struct io_pagetable *iopt, struct iopt_area *area, in iopt_insert_area() argument
217 area->iommu_prot = iommu_prot; in iopt_insert_area()
218 area->page_offset = start_byte % PAGE_SIZE; in iopt_insert_area()
219 if (area->page_offset & (iopt->iova_alignment - 1)) in iopt_insert_area()
222 area->node.start = iova; in iopt_insert_area()
223 if (check_add_overflow(iova, length - 1, &area->node.last)) in iopt_insert_area()
226 area->pages_node.start = start_byte / PAGE_SIZE; in iopt_insert_area()
227 if (check_add_overflow(start_byte, length - 1, &area->pages_node.last)) in iopt_insert_area()
229 area->pages_node.last = area->pages_node.last / PAGE_SIZE; in iopt_insert_area()
230 if (WARN_ON(area->pages_node.last >= pages->npages)) in iopt_insert_area()
234 * The area is inserted with a NULL pages indicating it is not fully in iopt_insert_area()
237 area->iopt = iopt; in iopt_insert_area()
238 interval_tree_insert(&area->node, &iopt->area_itree); in iopt_insert_area()
244 struct iopt_area *area; in iopt_area_alloc() local
246 area = kzalloc(sizeof(*area), GFP_KERNEL_ACCOUNT); in iopt_area_alloc()
247 if (!area) in iopt_area_alloc()
249 RB_CLEAR_NODE(&area->node.rb); in iopt_area_alloc()
250 RB_CLEAR_NODE(&area->pages_node.rb); in iopt_area_alloc()
251 return area; in iopt_area_alloc()
265 elm->area = iopt_area_alloc(); in iopt_alloc_area_pages()
266 if (!elm->area) in iopt_alloc_area_pages()
308 rc = iopt_insert_area(iopt, elm->area, elm->pages, iova, in iopt_alloc_area_pages()
320 static void iopt_abort_area(struct iopt_area *area) in iopt_abort_area() argument
323 WARN_ON(area->pages); in iopt_abort_area()
324 if (area->iopt) { in iopt_abort_area()
325 down_write(&area->iopt->iova_rwsem); in iopt_abort_area()
326 interval_tree_remove(&area->node, &area->iopt->area_itree); in iopt_abort_area()
327 up_write(&area->iopt->iova_rwsem); in iopt_abort_area()
329 kfree(area); in iopt_abort_area()
338 if (elm->area) in iopt_free_pages_list()
339 iopt_abort_area(elm->area); in iopt_free_pages_list()
354 rc = iopt_area_fill_domains(elm->area, elm->pages); in iopt_fill_domains_pages()
364 iopt_area_unfill_domains(undo_elm->area, undo_elm->pages); in iopt_fill_domains_pages()
389 * area->pages must be set inside the domains_rwsem to ensure in iopt_map_pages()
393 elm->area->pages = elm->pages; in iopt_map_pages()
395 elm->area = NULL; in iopt_map_pages()
422 if (elm.area) in iopt_map_common()
423 iopt_abort_area(elm.area); in iopt_map_common()
503 struct iopt_area *area; in __iommu_read_and_clear_dirty() local
513 iopt_for_each_contig_area(&iter, area, arg->iopt, iova, last_iova) { in __iommu_read_and_clear_dirty()
514 unsigned long last = min(last_iova, iopt_area_last_iova(area)); in __iommu_read_and_clear_dirty()
615 struct iopt_area *area; in iopt_clear_dirty_data() local
622 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; in iopt_clear_dirty_data()
623 area = iopt_area_iter_next(area, 0, ULONG_MAX)) { in iopt_clear_dirty_data()
624 if (!area->pages) in iopt_clear_dirty_data()
627 ret = ops->read_and_clear_dirty(domain, iopt_area_iova(area), in iopt_clear_dirty_data()
628 iopt_area_length(area), 0, in iopt_clear_dirty_data()
668 struct iopt_area *area; in iopt_get_pages() local
677 iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) { in iopt_get_pages()
679 unsigned long last = min(last_iova, iopt_area_last_iova(area)); in iopt_get_pages()
686 elm->start_byte = iopt_area_start_byte(area, iter.cur_iova); in iopt_get_pages()
687 elm->pages = area->pages; in iopt_get_pages()
707 struct iopt_area *area; in iopt_unmap_iova_range() local
714 * The domains_rwsem must be held in read mode any time any area->pages in iopt_unmap_iova_range()
716 * concurrently with cleaning up the area. in iopt_unmap_iova_range()
721 while ((area = iopt_area_iter_first(iopt, start, last))) { in iopt_unmap_iova_range()
722 unsigned long area_last = iopt_area_last_iova(area); in iopt_unmap_iova_range()
723 unsigned long area_first = iopt_area_iova(area); in iopt_unmap_iova_range()
726 /* Userspace should not race map/unmap's of the same area */ in iopt_unmap_iova_range()
727 if (!area->pages) { in iopt_unmap_iova_range()
732 /* The area is locked by an object that has not been destroyed */ in iopt_unmap_iova_range()
733 if (area->num_locks) { in iopt_unmap_iova_range()
751 if (area->num_accesses) { in iopt_unmap_iova_range()
752 size_t length = iopt_area_length(area); in iopt_unmap_iova_range()
755 area->prevent_access = true; in iopt_unmap_iova_range()
769 pages = area->pages; in iopt_unmap_iova_range()
770 area->pages = NULL; in iopt_unmap_iova_range()
773 iopt_area_unfill_domains(area, pages); in iopt_unmap_iova_range()
774 iopt_abort_area(area); in iopt_unmap_iova_range()
931 * This is used when removing a domain from the iopt. Every area in the iopt
938 struct iopt_area *area; in iopt_unfill_domain() local
952 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; in iopt_unfill_domain()
953 area = iopt_area_iter_next(area, 0, ULONG_MAX)) { in iopt_unfill_domain()
954 struct iopt_pages *pages = area->pages; in iopt_unfill_domain()
961 WARN_ON(!area->storage_domain); in iopt_unfill_domain()
962 if (area->storage_domain == domain) in iopt_unfill_domain()
963 area->storage_domain = storage_domain; in iopt_unfill_domain()
966 iopt_area_unmap_domain(area, domain); in iopt_unfill_domain()
971 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; in iopt_unfill_domain()
972 area = iopt_area_iter_next(area, 0, ULONG_MAX)) { in iopt_unfill_domain()
973 struct iopt_pages *pages = area->pages; in iopt_unfill_domain()
979 interval_tree_remove(&area->pages_node, &pages->domains_itree); in iopt_unfill_domain()
980 WARN_ON(area->storage_domain != domain); in iopt_unfill_domain()
981 area->storage_domain = NULL; in iopt_unfill_domain()
982 iopt_area_unfill_domain(area, pages, domain); in iopt_unfill_domain()
992 * Fill the domain with PFNs from every area in the iopt. On failure the domain
999 struct iopt_area *area; in iopt_fill_domain() local
1005 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; in iopt_fill_domain()
1006 area = iopt_area_iter_next(area, 0, ULONG_MAX)) { in iopt_fill_domain()
1007 struct iopt_pages *pages = area->pages; in iopt_fill_domain()
1013 rc = iopt_area_fill_domain(area, domain); in iopt_fill_domain()
1018 if (!area->storage_domain) { in iopt_fill_domain()
1020 area->storage_domain = domain; in iopt_fill_domain()
1021 interval_tree_insert(&area->pages_node, in iopt_fill_domain()
1029 end_area = area; in iopt_fill_domain()
1030 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; in iopt_fill_domain()
1031 area = iopt_area_iter_next(area, 0, ULONG_MAX)) { in iopt_fill_domain()
1032 struct iopt_pages *pages = area->pages; in iopt_fill_domain()
1034 if (area == end_area) in iopt_fill_domain()
1040 interval_tree_remove(&area->pages_node, in iopt_fill_domain()
1042 area->storage_domain = NULL; in iopt_fill_domain()
1044 iopt_area_unfill_domain(area, pages, domain); in iopt_fill_domain()
1050 /* All existing area's conform to an increased page size */
1055 struct iopt_area *area; in iopt_check_iova_alignment() local
1060 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; in iopt_check_iova_alignment()
1061 area = iopt_area_iter_next(area, 0, ULONG_MAX)) in iopt_check_iova_alignment()
1062 if ((iopt_area_iova(area) & align_mask) || in iopt_check_iova_alignment()
1063 (iopt_area_length(area) & align_mask) || in iopt_check_iova_alignment()
1064 (area->page_offset & align_mask)) in iopt_check_iova_alignment()
1119 /* No area exists that is outside the allowed domain aperture */ in iopt_table_add_domain()
1227 * iopt_area_split - Split an area into two parts at iova
1228 * @area: The area to split
1229 * @iova: Becomes the last of a new area
1231 * This splits an area into two. It is part of the VFIO compatibility to allow
1235 static int iopt_area_split(struct iopt_area *area, unsigned long iova) in iopt_area_split() argument
1237 unsigned long alignment = area->iopt->iova_alignment; in iopt_area_split()
1238 unsigned long last_iova = iopt_area_last_iova(area); in iopt_area_split()
1239 unsigned long start_iova = iopt_area_iova(area); in iopt_area_split()
1241 struct io_pagetable *iopt = area->iopt; in iopt_area_split()
1242 struct iopt_pages *pages = area->pages; in iopt_area_split()
1252 if (!pages || area->prevent_access) in iopt_area_split()
1256 iopt_area_start_byte(area, new_start) & (alignment - 1)) in iopt_area_split()
1274 if (area->num_accesses) { in iopt_area_split()
1283 if (area->storage_domain && !iopt->disable_large_pages) { in iopt_area_split()
1288 interval_tree_remove(&area->node, &iopt->area_itree); in iopt_area_split()
1289 rc = iopt_insert_area(iopt, lhs, area->pages, start_iova, in iopt_area_split()
1290 iopt_area_start_byte(area, start_iova), in iopt_area_split()
1292 area->iommu_prot); in iopt_area_split()
1296 rc = iopt_insert_area(iopt, rhs, area->pages, new_start, in iopt_area_split()
1297 iopt_area_start_byte(area, new_start), in iopt_area_split()
1298 last_iova - new_start + 1, area->iommu_prot); in iopt_area_split()
1303 * If the original area has filled a domain, domains_itree has to be in iopt_area_split()
1306 if (area->storage_domain) { in iopt_area_split()
1307 interval_tree_remove(&area->pages_node, &pages->domains_itree); in iopt_area_split()
1312 lhs->storage_domain = area->storage_domain; in iopt_area_split()
1313 lhs->pages = area->pages; in iopt_area_split()
1314 rhs->storage_domain = area->storage_domain; in iopt_area_split()
1315 rhs->pages = area->pages; in iopt_area_split()
1317 kfree(area); in iopt_area_split()
1329 interval_tree_insert(&area->node, &iopt->area_itree); in iopt_area_split()
1346 struct iopt_area *area; in iopt_cut_iova() local
1348 area = iopt_area_iter_first(iopt, iovas[i], iovas[i]); in iopt_cut_iova()
1349 if (!area) in iopt_cut_iova()
1351 rc = iopt_area_split(area, iovas[i]); in iopt_cut_iova()