xref: /linux/include/drm/drm_pagemap.h (revision 2e03c0c5c59a086df534e15ddde03cb33bc475c4)
1 /* SPDX-License-Identifier: MIT */
2 #ifndef _DRM_PAGEMAP_H_
3 #define _DRM_PAGEMAP_H_
4 
5 #include <linux/dma-direction.h>
6 #include <linux/hmm.h>
7 #include <linux/memremap.h>
8 #include <linux/types.h>
9 
10 #define NR_PAGES(order) (1U << (order))
11 
12 struct dma_fence;
13 struct drm_pagemap;
14 struct drm_pagemap_cache;
15 struct drm_pagemap_dev_hold;
16 struct drm_pagemap_zdd;
17 struct device;
18 
19 /**
20  * enum drm_interconnect_protocol - Used to identify an interconnect protocol.
21  *
22  * @DRM_INTERCONNECT_SYSTEM: DMA map is system pages
23  * @DRM_INTERCONNECT_DRIVER: DMA map is driver defined
24  */
25 enum drm_interconnect_protocol {
26 	DRM_INTERCONNECT_SYSTEM,
27 	DRM_INTERCONNECT_DRIVER,
28 	/* A driver can add private values beyond DRM_INTERCONNECT_DRIVER */
29 };
30 
31 /**
32  * struct drm_pagemap_addr - Address representation.
33  * @addr: The dma address or driver-defined address for driver private interconnects.
34  * @proto: The interconnect protocol.
35  * @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
36  * @dir: The DMA direction.
37  *
38  * Note: There is room for improvement here. We should be able to pack into
39  * 64 bits.
40  */
41 struct drm_pagemap_addr {
42 	dma_addr_t addr;
43 	u64 proto : 54;
44 	u64 order : 8;
45 	u64 dir : 2;
46 };
47 
48 /**
49  * drm_pagemap_addr_encode() - Encode a dma address with metadata
50  * @addr: The dma address or driver-defined address for driver private interconnects.
51  * @proto: The interconnect protocol.
52  * @order: The page order of the dma mapping. (Size is PAGE_SIZE << order).
53  * @dir: The DMA direction.
54  *
55  * Return: A struct drm_pagemap_addr encoding the above information.
56  */
57 static inline struct drm_pagemap_addr
58 drm_pagemap_addr_encode(dma_addr_t addr,
59 			enum drm_interconnect_protocol proto,
60 			unsigned int order,
61 			enum dma_data_direction dir)
62 {
63 	return (struct drm_pagemap_addr) {
64 		.addr = addr,
65 		.proto = proto,
66 		.order = order,
67 		.dir = dir,
68 	};
69 }
70 
71 /**
72  * struct drm_pagemap_ops: Ops for a drm-pagemap.
73  */
74 struct drm_pagemap_ops {
75 	/**
76 	 * @device_map: Map for device access or provide a virtual address suitable for
77 	 *
78 	 * @dpagemap: The struct drm_pagemap for the page.
79 	 * @dev: The device mapper.
80 	 * @page: The page to map.
81 	 * @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
82 	 * @dir: The transfer direction.
83 	 */
84 	struct drm_pagemap_addr (*device_map)(struct drm_pagemap *dpagemap,
85 					      struct device *dev,
86 					      struct page *page,
87 					      unsigned int order,
88 					      enum dma_data_direction dir);
89 
90 	/**
91 	 * @device_unmap: Unmap a device address previously obtained using @device_map.
92 	 *
93 	 * @dpagemap: The struct drm_pagemap for the mapping.
94 	 * @dev: The device unmapper.
95 	 * @addr: The device address obtained when mapping.
96 	 */
97 	void (*device_unmap)(struct drm_pagemap *dpagemap,
98 			     struct device *dev,
99 			     const struct drm_pagemap_addr *addr);
100 
101 	/**
102 	 * @populate_mm: Populate part of the mm with @dpagemap memory,
103 	 * migrating existing data.
104 	 * @dpagemap: The struct drm_pagemap managing the memory.
105 	 * @start: The virtual start address in @mm
106 	 * @end: The virtual end address in @mm
107 	 * @mm: Pointer to a live mm. The caller must have an mmget()
108 	 * reference.
109 	 *
110 	 * The caller will have the mm lock at least in read mode.
111 	 * Note that there is no guarantee that the memory is resident
112 	 * after the function returns, it's best effort only.
113 	 * When the mm is not using the memory anymore,
114 	 * it will be released. The struct drm_pagemap might have a
115 	 * mechanism in place to reclaim the memory and the data will
116 	 * then be migrated. Typically to system memory.
117 	 * The implementation should hold sufficient runtime power-
118 	 * references while pages are used in an address space and
119 	 * should ideally guard against hardware device unbind in
120 	 * a way such that device pages are migrated back to system
121 	 * followed by device page removal. The implementation should
122 	 * return -ENODEV after device removal.
123 	 *
124 	 * Return: 0 if successful. Negative error code on error.
125 	 */
126 	int (*populate_mm)(struct drm_pagemap *dpagemap,
127 			   unsigned long start, unsigned long end,
128 			   struct mm_struct *mm,
129 			   unsigned long timeslice_ms);
130 	/**
131 	 * @destroy: Destroy the drm_pagemap and associated resources.
132 	 * @dpagemap: The drm_pagemap to destroy.
133 	 * @is_atomic_or_reclaim: The function may be called from
134 	 * atomic- or reclaim context.
135 	 *
136 	 * The implementation should take care not to attempt to
137 	 * destroy resources that may already have been destroyed
138 	 * using devm_ callbacks, since this function may be called
139 	 * after the underlying struct device has been unbound.
140 	 * If the implementation defers the execution to a work item
141 	 * to avoid locking issues, then it must make sure the work
142 	 * items are flushed before module exit. If the destroy call
143 	 * happens after the provider's pci_remove() callback has
144 	 * been executed, a module reference and drm device reference is
145 	 * held across the destroy callback.
146 	 */
147 	void (*destroy)(struct drm_pagemap *dpagemap,
148 			bool is_atomic_or_reclaim);
149 };
150 
151 /**
152  * struct drm_pagemap: Additional information for a struct dev_pagemap
153  * used for device p2p handshaking.
154  * @ops: The struct drm_pagemap_ops.
155  * @ref: Reference count.
156  * @drm: The struct drm device owning the device-private memory.
157  * @pagemap: Pointer to the underlying dev_pagemap.
158  * @dev_hold: Pointer to a struct drm_pagemap_dev_hold for
159  * device referencing.
160  * @cache: Back-pointer to the &struct drm_pagemap_cache used for this
161  * &struct drm_pagemap. May be NULL if no cache is used.
162  * @shrink_link: Link into the shrinker's list of drm_pagemaps. Only
163  * used if also using a pagemap cache.
164  */
165 struct drm_pagemap {
166 	const struct drm_pagemap_ops *ops;
167 	struct kref ref;
168 	struct drm_device *drm;
169 	struct dev_pagemap *pagemap;
170 	struct drm_pagemap_dev_hold *dev_hold;
171 	struct drm_pagemap_cache *cache;
172 	struct list_head shrink_link;
173 };
174 
175 struct drm_pagemap_devmem;
176 
177 /**
178  * struct drm_pagemap_devmem_ops - Operations structure for GPU SVM device memory
179  *
180  * This structure defines the operations for GPU Shared Virtual Memory (SVM)
181  * device memory. These operations are provided by the GPU driver to manage device memory
182  * allocations and perform operations such as migration between device memory and system
183  * RAM.
184  */
185 struct drm_pagemap_devmem_ops {
186 	/**
187 	 * @devmem_release: Release device memory allocation (optional)
188 	 * @devmem_allocation: device memory allocation
189 	 *
190 	 * Release device memory allocation and drop a reference to device
191 	 * memory allocation.
192 	 */
193 	void (*devmem_release)(struct drm_pagemap_devmem *devmem_allocation);
194 
195 	/**
196 	 * @populate_devmem_pfn: Populate device memory PFN (required for migration)
197 	 * @devmem_allocation: device memory allocation
198 	 * @npages: Number of pages to populate
199 	 * @pfn: Array of page frame numbers to populate
200 	 *
201 	 * Populate device memory page frame numbers (PFN).
202 	 *
203 	 * Return: 0 on success, a negative error code on failure.
204 	 */
205 	int (*populate_devmem_pfn)(struct drm_pagemap_devmem *devmem_allocation,
206 				   unsigned long npages, unsigned long *pfn);
207 
208 	/**
209 	 * @copy_to_devmem: Copy to device memory (required for migration)
210 	 * @pages: Pointer to array of device memory pages (destination)
211 	 * @pagemap_addr: Pointer to array of DMA information (source)
212 	 * @npages: Number of pages to copy
213 	 * @pre_migrate_fence: dma-fence to wait for before migration start.
214 	 * May be NULL.
215 	 *
216 	 * Copy pages to device memory. If the order of a @pagemap_addr entry
217 	 * is greater than 0, the entry is populated but subsequent entries
218 	 * within the range of that order are not populated.
219 	 *
220 	 * Return: 0 on success, a negative error code on failure.
221 	 */
222 	int (*copy_to_devmem)(struct page **pages,
223 			      struct drm_pagemap_addr *pagemap_addr,
224 			      unsigned long npages,
225 			      struct dma_fence *pre_migrate_fence);
226 
227 	/**
228 	 * @copy_to_ram: Copy to system RAM (required for migration)
229 	 * @pages: Pointer to array of device memory pages (source)
230 	 * @pagemap_addr: Pointer to array of DMA information (destination)
231 	 * @npages: Number of pages to copy
232 	 * @pre_migrate_fence: dma-fence to wait for before migration start.
233 	 * May be NULL.
234 	 *
235 	 * Copy pages to system RAM. If the order of a @pagemap_addr entry
236 	 * is greater than 0, the entry is populated but subsequent entries
237 	 * within the range of that order are not populated.
238 	 *
239 	 * Return: 0 on success, a negative error code on failure.
240 	 */
241 	int (*copy_to_ram)(struct page **pages,
242 			   struct drm_pagemap_addr *pagemap_addr,
243 			   unsigned long npages,
244 			   struct dma_fence *pre_migrate_fence);
245 };
246 
247 #if IS_ENABLED(CONFIG_ZONE_DEVICE)
248 
249 int drm_pagemap_init(struct drm_pagemap *dpagemap,
250 		     struct dev_pagemap *pagemap,
251 		     struct drm_device *drm,
252 		     const struct drm_pagemap_ops *ops);
253 
254 struct drm_pagemap *drm_pagemap_create(struct drm_device *drm,
255 				       struct dev_pagemap *pagemap,
256 				       const struct drm_pagemap_ops *ops);
257 
258 struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page);
259 
260 void drm_pagemap_put(struct drm_pagemap *dpagemap);
261 
262 #else
263 
264 static inline struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page)
265 {
266 	return NULL;
267 }
268 
269 static inline void drm_pagemap_put(struct drm_pagemap *dpagemap)
270 {
271 }
272 
273 #endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */
274 
275 /**
276  * drm_pagemap_get() - Obtain a reference on a struct drm_pagemap
277  * @dpagemap: Pointer to the struct drm_pagemap, or NULL.
278  *
279  * Return: Pointer to the struct drm_pagemap, or NULL.
280  */
281 static inline struct drm_pagemap *
282 drm_pagemap_get(struct drm_pagemap *dpagemap)
283 {
284 	if (likely(dpagemap))
285 		kref_get(&dpagemap->ref);
286 
287 	return dpagemap;
288 }
289 
290 /**
291  * drm_pagemap_get_unless_zero() - Obtain a reference on a struct drm_pagemap
292  * unless the current reference count is zero.
293  * @dpagemap: Pointer to the drm_pagemap or NULL.
294  *
295  * Return: A pointer to @dpagemap if the reference count was successfully
296  * incremented. NULL if @dpagemap was NULL, or its refcount was 0.
297  */
298 static inline struct drm_pagemap * __must_check
299 drm_pagemap_get_unless_zero(struct drm_pagemap *dpagemap)
300 {
301 	return (dpagemap && kref_get_unless_zero(&dpagemap->ref)) ? dpagemap : NULL;
302 }
303 
304 /**
305  * struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation
306  *
307  * @dev: Pointer to the device structure which device memory allocation belongs to
308  * @mm: Pointer to the mm_struct for the address space
309  * @detached: device memory allocations is detached from device pages
310  * @ops: Pointer to the operations structure for GPU SVM device memory
311  * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
312  * @size: Size of device memory allocation
313  * @timeslice_expiration: Timeslice expiration in jiffies
314  * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts.
315  * (May be NULL).
316  */
317 struct drm_pagemap_devmem {
318 	struct device *dev;
319 	struct mm_struct *mm;
320 	struct completion detached;
321 	const struct drm_pagemap_devmem_ops *ops;
322 	struct drm_pagemap *dpagemap;
323 	size_t size;
324 	u64 timeslice_expiration;
325 	struct dma_fence *pre_migrate_fence;
326 };
327 
328 /**
329  * struct drm_pagemap_migrate_details - Details to govern migration.
330  * @timeslice_ms: The time requested for the migrated pagemap pages to
331  * be present in @mm before being allowed to be migrated back.
332  * @can_migrate_same_pagemap: Whether the copy function as indicated by
333  * the @source_peer_migrates flag, can migrate device pages within a
334  * single drm_pagemap.
335  * @source_peer_migrates: Whether on p2p migration, The source drm_pagemap
336  * should use the copy_to_ram() callback rather than the destination
337  * drm_pagemap should use the copy_to_devmem() callback.
338  */
339 struct drm_pagemap_migrate_details {
340 	unsigned long timeslice_ms;
341 	u32 can_migrate_same_pagemap : 1;
342 	u32 source_peer_migrates : 1;
343 };
344 
345 #if IS_ENABLED(CONFIG_ZONE_DEVICE)
346 
347 int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
348 				  struct mm_struct *mm,
349 				  unsigned long start, unsigned long end,
350 				  const struct drm_pagemap_migrate_details *mdetails);
351 
352 int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation);
353 
354 const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void);
355 
356 void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
357 			     struct device *dev, struct mm_struct *mm,
358 			     const struct drm_pagemap_devmem_ops *ops,
359 			     struct drm_pagemap *dpagemap, size_t size,
360 			     struct dma_fence *pre_migrate_fence);
361 
362 int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
363 			    unsigned long start, unsigned long end,
364 			    struct mm_struct *mm,
365 			    unsigned long timeslice_ms);
366 
367 void drm_pagemap_destroy(struct drm_pagemap *dpagemap, bool is_atomic_or_reclaim);
368 
369 int drm_pagemap_reinit(struct drm_pagemap *dpagemap);
370 
371 /**
372  * drm_pagemap_page_zone_device_data() - Page to zone_device_data
373  * @page: Pointer to the page
374  *
375  * Return: Page's zone_device_data
376  */
377 static inline struct drm_pagemap_zdd *drm_pagemap_page_zone_device_data(struct page *page)
378 {
379 	struct folio *folio = page_folio(page);
380 
381 	return folio_zone_device_data(folio);
382 }
383 
384 #else
385 
386 static inline struct drm_pagemap_zdd *drm_pagemap_page_zone_device_data(struct page *page)
387 {
388 	return NULL;
389 }
390 
391 #endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */
392 
393 #endif
394