1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
3 *
4 */
5 #ifndef __IO_PAGETABLE_H
6 #define __IO_PAGETABLE_H
7
8 #include <linux/dma-buf.h>
9 #include <linux/interval_tree.h>
10 #include <linux/kref.h>
11 #include <linux/mutex.h>
12 #include <linux/xarray.h>
13
14 #include "iommufd_private.h"
15
16 struct iommu_domain;
17
18 /*
19 * Each io_pagetable is composed of intervals of areas which cover regions of
20 * the iova that are backed by something. iova not covered by areas is not
21 * populated in the page table. Each area is fully populated with pages.
22 *
23 * iovas are in byte units, but must be iopt->iova_alignment aligned.
24 *
25 * pages can be NULL, this means some other thread is still working on setting
26 * up or tearing down the area. When observed under the write side of the
27 * domain_rwsem a NULL pages must mean the area is still being setup and no
28 * domains are filled.
29 *
30 * storage_domain points at an arbitrary iommu_domain that is holding the PFNs
31 * for this area. It is locked by the pages->mutex. This simplifies the locking
32 * as the pages code can rely on the storage_domain without having to get the
33 * iopt->domains_rwsem.
34 *
35 * The io_pagetable::iova_rwsem protects node
36 * The iopt_pages::mutex protects pages_node
37 * iopt and iommu_prot are immutable
38 * The pages::mutex protects num_accesses
39 */
40 struct iopt_area {
41 struct interval_tree_node node;
42 struct interval_tree_node pages_node;
43 struct io_pagetable *iopt;
44 struct iopt_pages *pages;
45 struct iommu_domain *storage_domain;
46 /* How many bytes into the first page the area starts */
47 unsigned int page_offset;
48 /* IOMMU_READ, IOMMU_WRITE, etc */
49 int iommu_prot;
50 bool prevent_access : 1;
51 unsigned int num_accesses;
52 unsigned int num_locks;
53 };
54
55 struct iopt_allowed {
56 struct interval_tree_node node;
57 };
58
59 struct iopt_reserved {
60 struct interval_tree_node node;
61 void *owner;
62 };
63
64 int iopt_area_fill_domains(struct iopt_area *area, struct iopt_pages *pages);
65 void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages);
66
67 int iopt_area_fill_domain(struct iopt_area *area, struct iommu_domain *domain);
68 void iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages,
69 struct iommu_domain *domain);
70 void iopt_area_unmap_domain(struct iopt_area *area,
71 struct iommu_domain *domain);
72
73 int iopt_dmabuf_track_domain(struct iopt_pages *pages, struct iopt_area *area,
74 struct iommu_domain *domain);
75 void iopt_dmabuf_untrack_domain(struct iopt_pages *pages,
76 struct iopt_area *area,
77 struct iommu_domain *domain);
78 int iopt_dmabuf_track_all_domains(struct iopt_area *area,
79 struct iopt_pages *pages);
80 void iopt_dmabuf_untrack_all_domains(struct iopt_area *area,
81 struct iopt_pages *pages);
82
iopt_area_index(struct iopt_area * area)83 static inline unsigned long iopt_area_index(struct iopt_area *area)
84 {
85 return area->pages_node.start;
86 }
87
iopt_area_last_index(struct iopt_area * area)88 static inline unsigned long iopt_area_last_index(struct iopt_area *area)
89 {
90 return area->pages_node.last;
91 }
92
iopt_area_iova(struct iopt_area * area)93 static inline unsigned long iopt_area_iova(struct iopt_area *area)
94 {
95 return area->node.start;
96 }
97
iopt_area_last_iova(struct iopt_area * area)98 static inline unsigned long iopt_area_last_iova(struct iopt_area *area)
99 {
100 return area->node.last;
101 }
102
iopt_area_length(struct iopt_area * area)103 static inline size_t iopt_area_length(struct iopt_area *area)
104 {
105 return (area->node.last - area->node.start) + 1;
106 }
107
108 /*
109 * Number of bytes from the start of the iopt_pages that the iova begins.
110 * iopt_area_start_byte() / PAGE_SIZE encodes the starting page index
111 * iopt_area_start_byte() % PAGE_SIZE encodes the offset within that page
112 */
iopt_area_start_byte(struct iopt_area * area,unsigned long iova)113 static inline unsigned long iopt_area_start_byte(struct iopt_area *area,
114 unsigned long iova)
115 {
116 if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
117 WARN_ON(iova < iopt_area_iova(area) ||
118 iova > iopt_area_last_iova(area));
119 return (iova - iopt_area_iova(area)) + area->page_offset +
120 iopt_area_index(area) * PAGE_SIZE;
121 }
122
iopt_area_iova_to_index(struct iopt_area * area,unsigned long iova)123 static inline unsigned long iopt_area_iova_to_index(struct iopt_area *area,
124 unsigned long iova)
125 {
126 return iopt_area_start_byte(area, iova) / PAGE_SIZE;
127 }
128
129 #define __make_iopt_iter(name) \
130 static inline struct iopt_##name *iopt_##name##_iter_first( \
131 struct io_pagetable *iopt, unsigned long start, \
132 unsigned long last) \
133 { \
134 struct interval_tree_node *node; \
135 \
136 lockdep_assert_held(&iopt->iova_rwsem); \
137 node = interval_tree_iter_first(&iopt->name##_itree, start, \
138 last); \
139 if (!node) \
140 return NULL; \
141 return container_of(node, struct iopt_##name, node); \
142 } \
143 static inline struct iopt_##name *iopt_##name##_iter_next( \
144 struct iopt_##name *last_node, unsigned long start, \
145 unsigned long last) \
146 { \
147 struct interval_tree_node *node; \
148 \
149 node = interval_tree_iter_next(&last_node->node, start, last); \
150 if (!node) \
151 return NULL; \
152 return container_of(node, struct iopt_##name, node); \
153 }
154
155 __make_iopt_iter(area)
156 __make_iopt_iter(allowed)
157 __make_iopt_iter(reserved)
158
159 struct iopt_area_contig_iter {
160 unsigned long cur_iova;
161 unsigned long last_iova;
162 struct iopt_area *area;
163 };
164 struct iopt_area *iopt_area_contig_init(struct iopt_area_contig_iter *iter,
165 struct io_pagetable *iopt,
166 unsigned long iova,
167 unsigned long last_iova);
168 struct iopt_area *iopt_area_contig_next(struct iopt_area_contig_iter *iter);
169
iopt_area_contig_done(struct iopt_area_contig_iter * iter)170 static inline bool iopt_area_contig_done(struct iopt_area_contig_iter *iter)
171 {
172 return iter->area && iter->last_iova <= iopt_area_last_iova(iter->area);
173 }
174
175 /*
176 * Iterate over a contiguous list of areas that span the iova,last_iova range.
177 * The caller must check iopt_area_contig_done() after the loop to see if
178 * contiguous areas existed.
179 */
180 #define iopt_for_each_contig_area(iter, area, iopt, iova, last_iova) \
181 for (area = iopt_area_contig_init(iter, iopt, iova, last_iova); area; \
182 area = iopt_area_contig_next(iter))
183
184 enum {
185 IOPT_PAGES_ACCOUNT_NONE = 0,
186 IOPT_PAGES_ACCOUNT_USER = 1,
187 IOPT_PAGES_ACCOUNT_MM = 2,
188 IOPT_PAGES_ACCOUNT_MODE_NUM = 3,
189 };
190
191 enum iopt_address_type {
192 IOPT_ADDRESS_USER = 0,
193 IOPT_ADDRESS_FILE,
194 IOPT_ADDRESS_DMABUF,
195 };
196
197 struct iopt_pages_dmabuf_track {
198 struct iommu_domain *domain;
199 struct iopt_area *area;
200 struct list_head elm;
201 };
202
203 struct iopt_pages_dmabuf {
204 struct dma_buf_attachment *attach;
205 struct dma_buf_phys_vec phys;
206 /* Always PAGE_SIZE aligned */
207 unsigned long start;
208 struct list_head tracker;
209 };
210
211 /*
212 * This holds a pinned page list for multiple areas of IO address space. The
213 * pages always originate from a linear chunk of userspace VA. Multiple
214 * io_pagetable's, through their iopt_area's, can share a single iopt_pages
215 * which avoids multi-pinning and double accounting of page consumption.
216 *
217 * indexes in this structure are measured in PAGE_SIZE units, are 0 based from
218 * the start of the uptr and extend to npages. pages are pinned dynamically
219 * according to the intervals in the access_itree and domains_itree, npinned
220 * records the current number of pages pinned.
221 */
222 struct iopt_pages {
223 struct kref kref;
224 struct mutex mutex;
225 size_t npages;
226 size_t npinned;
227 size_t last_npinned;
228 struct task_struct *source_task;
229 struct mm_struct *source_mm;
230 struct user_struct *source_user;
231 enum iopt_address_type type;
232 union {
233 void __user *uptr; /* IOPT_ADDRESS_USER */
234 struct { /* IOPT_ADDRESS_FILE */
235 struct file *file;
236 unsigned long start;
237 };
238 /* IOPT_ADDRESS_DMABUF */
239 struct iopt_pages_dmabuf dmabuf;
240 };
241 bool writable:1;
242 u8 account_mode;
243
244 struct xarray pinned_pfns;
245 /* Of iopt_pages_access::node */
246 struct rb_root_cached access_itree;
247 /* Of iopt_area::pages_node */
248 struct rb_root_cached domains_itree;
249 };
250
iopt_is_dmabuf(struct iopt_pages * pages)251 static inline bool iopt_is_dmabuf(struct iopt_pages *pages)
252 {
253 if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
254 return false;
255 return pages->type == IOPT_ADDRESS_DMABUF;
256 }
257
iopt_dmabuf_revoked(struct iopt_pages * pages)258 static inline bool iopt_dmabuf_revoked(struct iopt_pages *pages)
259 {
260 lockdep_assert_held(&pages->mutex);
261 if (iopt_is_dmabuf(pages))
262 return pages->dmabuf.phys.len == 0;
263 return false;
264 }
265
266 struct iopt_pages *iopt_alloc_user_pages(void __user *uptr,
267 unsigned long length, bool writable);
268 struct iopt_pages *iopt_alloc_file_pages(struct file *file,
269 unsigned long start_byte,
270 unsigned long start,
271 unsigned long length, bool writable);
272 struct iopt_pages *iopt_alloc_dmabuf_pages(struct iommufd_ctx *ictx,
273 struct dma_buf *dmabuf,
274 unsigned long start_byte,
275 unsigned long start,
276 unsigned long length, bool writable);
277 void iopt_release_pages(struct kref *kref);
iopt_put_pages(struct iopt_pages * pages)278 static inline void iopt_put_pages(struct iopt_pages *pages)
279 {
280 kref_put(&pages->kref, iopt_release_pages);
281 }
282
283 void iopt_pages_fill_from_xarray(struct iopt_pages *pages, unsigned long start,
284 unsigned long last, struct page **out_pages);
285 int iopt_pages_fill_xarray(struct iopt_pages *pages, unsigned long start,
286 unsigned long last, struct page **out_pages);
287 void iopt_pages_unfill_xarray(struct iopt_pages *pages, unsigned long start,
288 unsigned long last);
289
290 int iopt_area_add_access(struct iopt_area *area, unsigned long start,
291 unsigned long last, struct page **out_pages,
292 unsigned int flags, bool lock_area);
293 void iopt_area_remove_access(struct iopt_area *area, unsigned long start,
294 unsigned long last, bool unlock_area);
295 int iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte,
296 void *data, unsigned long length, unsigned int flags);
297
298 /*
299 * Each interval represents an active iopt_access_pages(), it acts as an
300 * interval lock that keeps the PFNs pinned and stored in the xarray.
301 */
302 struct iopt_pages_access {
303 struct interval_tree_node node;
304 unsigned int users;
305 };
306
307 struct pfn_reader_user;
308
309 int iopt_pages_update_pinned(struct iopt_pages *pages, unsigned long npages,
310 bool inc, struct pfn_reader_user *user);
311
312 #endif
313