1 /* SPDX-License-Identifier: MIT */
2 #ifndef _DRM_PAGEMAP_H_
3 #define _DRM_PAGEMAP_H_
4
5 #include <linux/dma-direction.h>
6 #include <linux/hmm.h>
7 #include <linux/types.h>
8
9 #define NR_PAGES(order) (1U << (order))
10
11 struct dma_fence;
12 struct drm_pagemap;
13 struct drm_pagemap_cache;
14 struct drm_pagemap_dev_hold;
15 struct drm_pagemap_zdd;
16 struct device;
17
18 /**
19 * enum drm_interconnect_protocol - Used to identify an interconnect protocol.
20 *
21 * @DRM_INTERCONNECT_SYSTEM: DMA map is system pages
22 * @DRM_INTERCONNECT_DRIVER: DMA map is driver defined
23 */
24 enum drm_interconnect_protocol {
25 DRM_INTERCONNECT_SYSTEM,
26 DRM_INTERCONNECT_DRIVER,
27 /* A driver can add private values beyond DRM_INTERCONNECT_DRIVER */
28 };
29
30 /**
31 * struct drm_pagemap_addr - Address representation.
32 * @addr: The dma address or driver-defined address for driver private interconnects.
33 * @proto: The interconnect protocol.
34 * @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
35 * @dir: The DMA direction.
36 *
37 * Note: There is room for improvement here. We should be able to pack into
38 * 64 bits.
39 */
40 struct drm_pagemap_addr {
41 dma_addr_t addr;
42 u64 proto : 54;
43 u64 order : 8;
44 u64 dir : 2;
45 };
46
47 /**
48 * drm_pagemap_addr_encode() - Encode a dma address with metadata
49 * @addr: The dma address or driver-defined address for driver private interconnects.
50 * @proto: The interconnect protocol.
51 * @order: The page order of the dma mapping. (Size is PAGE_SIZE << order).
52 * @dir: The DMA direction.
53 *
54 * Return: A struct drm_pagemap_addr encoding the above information.
55 */
56 static inline struct drm_pagemap_addr
drm_pagemap_addr_encode(dma_addr_t addr,enum drm_interconnect_protocol proto,unsigned int order,enum dma_data_direction dir)57 drm_pagemap_addr_encode(dma_addr_t addr,
58 enum drm_interconnect_protocol proto,
59 unsigned int order,
60 enum dma_data_direction dir)
61 {
62 return (struct drm_pagemap_addr) {
63 .addr = addr,
64 .proto = proto,
65 .order = order,
66 .dir = dir,
67 };
68 }
69
70 /**
71 * struct drm_pagemap_ops: Ops for a drm-pagemap.
72 */
73 struct drm_pagemap_ops {
74 /**
75 * @device_map: Map for device access or provide a virtual address suitable for
76 *
77 * @dpagemap: The struct drm_pagemap for the page.
78 * @dev: The device mapper.
79 * @page: The page to map.
80 * @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
81 * @dir: The transfer direction.
82 */
83 struct drm_pagemap_addr (*device_map)(struct drm_pagemap *dpagemap,
84 struct device *dev,
85 struct page *page,
86 unsigned int order,
87 enum dma_data_direction dir);
88
89 /**
90 * @device_unmap: Unmap a device address previously obtained using @device_map.
91 *
92 * @dpagemap: The struct drm_pagemap for the mapping.
93 * @dev: The device unmapper.
94 * @addr: The device address obtained when mapping.
95 */
96 void (*device_unmap)(struct drm_pagemap *dpagemap,
97 struct device *dev,
98 const struct drm_pagemap_addr *addr);
99
100 /**
101 * @populate_mm: Populate part of the mm with @dpagemap memory,
102 * migrating existing data.
103 * @dpagemap: The struct drm_pagemap managing the memory.
104 * @start: The virtual start address in @mm
105 * @end: The virtual end address in @mm
106 * @mm: Pointer to a live mm. The caller must have an mmget()
107 * reference.
108 *
109 * The caller will have the mm lock at least in read mode.
110 * Note that there is no guarantee that the memory is resident
111 * after the function returns, it's best effort only.
112 * When the mm is not using the memory anymore,
113 * it will be released. The struct drm_pagemap might have a
114 * mechanism in place to reclaim the memory and the data will
115 * then be migrated. Typically to system memory.
116 * The implementation should hold sufficient runtime power-
117 * references while pages are used in an address space and
118 * should ideally guard against hardware device unbind in
119 * a way such that device pages are migrated back to system
120 * followed by device page removal. The implementation should
121 * return -ENODEV after device removal.
122 *
123 * Return: 0 if successful. Negative error code on error.
124 */
125 int (*populate_mm)(struct drm_pagemap *dpagemap,
126 unsigned long start, unsigned long end,
127 struct mm_struct *mm,
128 unsigned long timeslice_ms);
129 /**
130 * @destroy: Destroy the drm_pagemap and associated resources.
131 * @dpagemap: The drm_pagemap to destroy.
132 * @is_atomic_or_reclaim: The function may be called from
133 * atomic- or reclaim context.
134 *
135 * The implementation should take care not to attempt to
136 * destroy resources that may already have been destroyed
137 * using devm_ callbacks, since this function may be called
138 * after the underlying struct device has been unbound.
139 * If the implementation defers the execution to a work item
140 * to avoid locking issues, then it must make sure the work
141 * items are flushed before module exit. If the destroy call
142 * happens after the provider's pci_remove() callback has
143 * been executed, a module reference and drm device reference is
144 * held across the destroy callback.
145 */
146 void (*destroy)(struct drm_pagemap *dpagemap,
147 bool is_atomic_or_reclaim);
148 };
149
150 /**
151 * struct drm_pagemap: Additional information for a struct dev_pagemap
152 * used for device p2p handshaking.
153 * @ops: The struct drm_pagemap_ops.
154 * @ref: Reference count.
155 * @drm: The struct drm device owning the device-private memory.
156 * @pagemap: Pointer to the underlying dev_pagemap.
157 * @dev_hold: Pointer to a struct drm_pagemap_dev_hold for
158 * device referencing.
159 * @cache: Back-pointer to the &struct drm_pagemap_cache used for this
160 * &struct drm_pagemap. May be NULL if no cache is used.
161 * @shrink_link: Link into the shrinker's list of drm_pagemaps. Only
162 * used if also using a pagemap cache.
163 */
164 struct drm_pagemap {
165 const struct drm_pagemap_ops *ops;
166 struct kref ref;
167 struct drm_device *drm;
168 struct dev_pagemap *pagemap;
169 struct drm_pagemap_dev_hold *dev_hold;
170 struct drm_pagemap_cache *cache;
171 struct list_head shrink_link;
172 };
173
174 struct drm_pagemap_devmem;
175
176 /**
177 * struct drm_pagemap_devmem_ops - Operations structure for GPU SVM device memory
178 *
179 * This structure defines the operations for GPU Shared Virtual Memory (SVM)
180 * device memory. These operations are provided by the GPU driver to manage device memory
181 * allocations and perform operations such as migration between device memory and system
182 * RAM.
183 */
184 struct drm_pagemap_devmem_ops {
185 /**
186 * @devmem_release: Release device memory allocation (optional)
187 * @devmem_allocation: device memory allocation
188 *
189 * Release device memory allocation and drop a reference to device
190 * memory allocation.
191 */
192 void (*devmem_release)(struct drm_pagemap_devmem *devmem_allocation);
193
194 /**
195 * @populate_devmem_pfn: Populate device memory PFN (required for migration)
196 * @devmem_allocation: device memory allocation
197 * @npages: Number of pages to populate
198 * @pfn: Array of page frame numbers to populate
199 *
200 * Populate device memory page frame numbers (PFN).
201 *
202 * Return: 0 on success, a negative error code on failure.
203 */
204 int (*populate_devmem_pfn)(struct drm_pagemap_devmem *devmem_allocation,
205 unsigned long npages, unsigned long *pfn);
206
207 /**
208 * @copy_to_devmem: Copy to device memory (required for migration)
209 * @pages: Pointer to array of device memory pages (destination)
210 * @pagemap_addr: Pointer to array of DMA information (source)
211 * @npages: Number of pages to copy
212 * @pre_migrate_fence: dma-fence to wait for before migration start.
213 * May be NULL.
214 *
215 * Copy pages to device memory. If the order of a @pagemap_addr entry
216 * is greater than 0, the entry is populated but subsequent entries
217 * within the range of that order are not populated.
218 *
219 * Return: 0 on success, a negative error code on failure.
220 */
221 int (*copy_to_devmem)(struct page **pages,
222 struct drm_pagemap_addr *pagemap_addr,
223 unsigned long npages,
224 struct dma_fence *pre_migrate_fence);
225
226 /**
227 * @copy_to_ram: Copy to system RAM (required for migration)
228 * @pages: Pointer to array of device memory pages (source)
229 * @pagemap_addr: Pointer to array of DMA information (destination)
230 * @npages: Number of pages to copy
231 * @pre_migrate_fence: dma-fence to wait for before migration start.
232 * May be NULL.
233 *
234 * Copy pages to system RAM. If the order of a @pagemap_addr entry
235 * is greater than 0, the entry is populated but subsequent entries
236 * within the range of that order are not populated.
237 *
238 * Return: 0 on success, a negative error code on failure.
239 */
240 int (*copy_to_ram)(struct page **pages,
241 struct drm_pagemap_addr *pagemap_addr,
242 unsigned long npages,
243 struct dma_fence *pre_migrate_fence);
244 };
245
246 #if IS_ENABLED(CONFIG_ZONE_DEVICE)
247
248 int drm_pagemap_init(struct drm_pagemap *dpagemap,
249 struct dev_pagemap *pagemap,
250 struct drm_device *drm,
251 const struct drm_pagemap_ops *ops);
252
253 struct drm_pagemap *drm_pagemap_create(struct drm_device *drm,
254 struct dev_pagemap *pagemap,
255 const struct drm_pagemap_ops *ops);
256
257 struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page);
258
259 void drm_pagemap_put(struct drm_pagemap *dpagemap);
260
261 #else
262
drm_pagemap_page_to_dpagemap(struct page * page)263 static inline struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page)
264 {
265 return NULL;
266 }
267
drm_pagemap_put(struct drm_pagemap * dpagemap)268 static inline void drm_pagemap_put(struct drm_pagemap *dpagemap)
269 {
270 }
271
272 #endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */
273
274 /**
275 * drm_pagemap_get() - Obtain a reference on a struct drm_pagemap
276 * @dpagemap: Pointer to the struct drm_pagemap, or NULL.
277 *
278 * Return: Pointer to the struct drm_pagemap, or NULL.
279 */
280 static inline struct drm_pagemap *
drm_pagemap_get(struct drm_pagemap * dpagemap)281 drm_pagemap_get(struct drm_pagemap *dpagemap)
282 {
283 if (likely(dpagemap))
284 kref_get(&dpagemap->ref);
285
286 return dpagemap;
287 }
288
289 /**
290 * drm_pagemap_get_unless_zero() - Obtain a reference on a struct drm_pagemap
291 * unless the current reference count is zero.
292 * @dpagemap: Pointer to the drm_pagemap or NULL.
293 *
294 * Return: A pointer to @dpagemap if the reference count was successfully
295 * incremented. NULL if @dpagemap was NULL, or its refcount was 0.
296 */
297 static inline struct drm_pagemap * __must_check
drm_pagemap_get_unless_zero(struct drm_pagemap * dpagemap)298 drm_pagemap_get_unless_zero(struct drm_pagemap *dpagemap)
299 {
300 return (dpagemap && kref_get_unless_zero(&dpagemap->ref)) ? dpagemap : NULL;
301 }
302
303 /**
304 * struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation
305 *
306 * @dev: Pointer to the device structure which device memory allocation belongs to
307 * @mm: Pointer to the mm_struct for the address space
308 * @detached: device memory allocations is detached from device pages
309 * @ops: Pointer to the operations structure for GPU SVM device memory
310 * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
311 * @size: Size of device memory allocation
312 * @timeslice_expiration: Timeslice expiration in jiffies
313 * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts.
314 * (May be NULL).
315 */
316 struct drm_pagemap_devmem {
317 struct device *dev;
318 struct mm_struct *mm;
319 struct completion detached;
320 const struct drm_pagemap_devmem_ops *ops;
321 struct drm_pagemap *dpagemap;
322 size_t size;
323 u64 timeslice_expiration;
324 struct dma_fence *pre_migrate_fence;
325 };
326
327 /**
328 * struct drm_pagemap_migrate_details - Details to govern migration.
329 * @timeslice_ms: The time requested for the migrated pagemap pages to
330 * be present in @mm before being allowed to be migrated back.
331 * @can_migrate_same_pagemap: Whether the copy function as indicated by
332 * the @source_peer_migrates flag, can migrate device pages within a
333 * single drm_pagemap.
334 * @source_peer_migrates: Whether on p2p migration, The source drm_pagemap
335 * should use the copy_to_ram() callback rather than the destination
336 * drm_pagemap should use the copy_to_devmem() callback.
337 */
338 struct drm_pagemap_migrate_details {
339 unsigned long timeslice_ms;
340 u32 can_migrate_same_pagemap : 1;
341 u32 source_peer_migrates : 1;
342 };
343
344 #if IS_ENABLED(CONFIG_ZONE_DEVICE)
345
346 int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
347 struct mm_struct *mm,
348 unsigned long start, unsigned long end,
349 const struct drm_pagemap_migrate_details *mdetails);
350
351 int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation);
352
353 const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void);
354
355 void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
356 struct device *dev, struct mm_struct *mm,
357 const struct drm_pagemap_devmem_ops *ops,
358 struct drm_pagemap *dpagemap, size_t size,
359 struct dma_fence *pre_migrate_fence);
360
361 int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
362 unsigned long start, unsigned long end,
363 struct mm_struct *mm,
364 unsigned long timeslice_ms);
365
366 void drm_pagemap_destroy(struct drm_pagemap *dpagemap, bool is_atomic_or_reclaim);
367
368 int drm_pagemap_reinit(struct drm_pagemap *dpagemap);
369
370 #endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */
371
372 #endif
373