xref: /linux/include/drm/drm_pagemap.h (revision 44343e8b250abb2f6bfd615493ca07a7f11f3cc2)
1 /* SPDX-License-Identifier: MIT */
2 #ifndef _DRM_PAGEMAP_H_
3 #define _DRM_PAGEMAP_H_
4 
5 #include <linux/dma-direction.h>
6 #include <linux/hmm.h>
7 #include <linux/types.h>
8 
9 #define NR_PAGES(order) (1U << (order))
10 
11 struct drm_pagemap;
12 struct drm_pagemap_zdd;
13 struct device;
14 
15 /**
16  * enum drm_interconnect_protocol - Used to identify an interconnect protocol.
17  *
18  * @DRM_INTERCONNECT_SYSTEM: DMA map is system pages
19  * @DRM_INTERCONNECT_DRIVER: DMA map is driver defined
20  */
21 enum drm_interconnect_protocol {
22 	DRM_INTERCONNECT_SYSTEM,
23 	DRM_INTERCONNECT_DRIVER,
24 	/* A driver can add private values beyond DRM_INTERCONNECT_DRIVER */
25 };
26 
27 /**
28  * struct drm_pagemap_addr - Address representation.
29  * @addr: The dma address or driver-defined address for driver private interconnects.
30  * @proto: The interconnect protocol.
31  * @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
32  * @dir: The DMA direction.
33  *
34  * Note: There is room for improvement here. We should be able to pack into
35  * 64 bits.
36  */
37 struct drm_pagemap_addr {
38 	dma_addr_t addr;
39 	u64 proto : 54;
40 	u64 order : 8;
41 	u64 dir : 2;
42 };
43 
44 /**
45  * drm_pagemap_addr_encode() - Encode a dma address with metadata
46  * @addr: The dma address or driver-defined address for driver private interconnects.
47  * @proto: The interconnect protocol.
48  * @order: The page order of the dma mapping. (Size is PAGE_SIZE << order).
49  * @dir: The DMA direction.
50  *
51  * Return: A struct drm_pagemap_addr encoding the above information.
52  */
53 static inline struct drm_pagemap_addr
54 drm_pagemap_addr_encode(dma_addr_t addr,
55 			enum drm_interconnect_protocol proto,
56 			unsigned int order,
57 			enum dma_data_direction dir)
58 {
59 	return (struct drm_pagemap_addr) {
60 		.addr = addr,
61 		.proto = proto,
62 		.order = order,
63 		.dir = dir,
64 	};
65 }
66 
67 /**
68  * struct drm_pagemap_ops: Ops for a drm-pagemap.
69  */
70 struct drm_pagemap_ops {
71 	/**
72 	 * @device_map: Map for device access or provide a virtual address suitable for
73 	 *
74 	 * @dpagemap: The struct drm_pagemap for the page.
75 	 * @dev: The device mapper.
76 	 * @page: The page to map.
77 	 * @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
78 	 * @dir: The transfer direction.
79 	 */
80 	struct drm_pagemap_addr (*device_map)(struct drm_pagemap *dpagemap,
81 					      struct device *dev,
82 					      struct page *page,
83 					      unsigned int order,
84 					      enum dma_data_direction dir);
85 
86 	/**
87 	 * @device_unmap: Unmap a device address previously obtained using @device_map.
88 	 *
89 	 * @dpagemap: The struct drm_pagemap for the mapping.
90 	 * @dev: The device unmapper.
91 	 * @addr: The device address obtained when mapping.
92 	 */
93 	void (*device_unmap)(struct drm_pagemap *dpagemap,
94 			     struct device *dev,
95 			     struct drm_pagemap_addr addr);
96 
97 	/**
98 	 * @populate_mm: Populate part of the mm with @dpagemap memory,
99 	 * migrating existing data.
100 	 * @dpagemap: The struct drm_pagemap managing the memory.
101 	 * @start: The virtual start address in @mm
102 	 * @end: The virtual end address in @mm
103 	 * @mm: Pointer to a live mm. The caller must have an mmget()
104 	 * reference.
105 	 *
106 	 * The caller will have the mm lock at least in read mode.
107 	 * Note that there is no guarantee that the memory is resident
108 	 * after the function returns, it's best effort only.
109 	 * When the mm is not using the memory anymore,
110 	 * it will be released. The struct drm_pagemap might have a
111 	 * mechanism in place to reclaim the memory and the data will
112 	 * then be migrated. Typically to system memory.
113 	 * The implementation should hold sufficient runtime power-
114 	 * references while pages are used in an address space and
115 	 * should ideally guard against hardware device unbind in
116 	 * a way such that device pages are migrated back to system
117 	 * followed by device page removal. The implementation should
118 	 * return -ENODEV after device removal.
119 	 *
120 	 * Return: 0 if successful. Negative error code on error.
121 	 */
122 	int (*populate_mm)(struct drm_pagemap *dpagemap,
123 			   unsigned long start, unsigned long end,
124 			   struct mm_struct *mm,
125 			   unsigned long timeslice_ms);
126 };
127 
128 /**
129  * struct drm_pagemap: Additional information for a struct dev_pagemap
130  * used for device p2p handshaking.
131  * @ops: The struct drm_pagemap_ops.
132  * @dev: The struct drevice owning the device-private memory.
133  */
134 struct drm_pagemap {
135 	const struct drm_pagemap_ops *ops;
136 	struct device *dev;
137 };
138 
139 struct drm_pagemap_devmem;
140 
141 /**
142  * struct drm_pagemap_devmem_ops - Operations structure for GPU SVM device memory
143  *
144  * This structure defines the operations for GPU Shared Virtual Memory (SVM)
145  * device memory. These operations are provided by the GPU driver to manage device memory
146  * allocations and perform operations such as migration between device memory and system
147  * RAM.
148  */
149 struct drm_pagemap_devmem_ops {
150 	/**
151 	 * @devmem_release: Release device memory allocation (optional)
152 	 * @devmem_allocation: device memory allocation
153 	 *
154 	 * Release device memory allocation and drop a reference to device
155 	 * memory allocation.
156 	 */
157 	void (*devmem_release)(struct drm_pagemap_devmem *devmem_allocation);
158 
159 	/**
160 	 * @populate_devmem_pfn: Populate device memory PFN (required for migration)
161 	 * @devmem_allocation: device memory allocation
162 	 * @npages: Number of pages to populate
163 	 * @pfn: Array of page frame numbers to populate
164 	 *
165 	 * Populate device memory page frame numbers (PFN).
166 	 *
167 	 * Return: 0 on success, a negative error code on failure.
168 	 */
169 	int (*populate_devmem_pfn)(struct drm_pagemap_devmem *devmem_allocation,
170 				   unsigned long npages, unsigned long *pfn);
171 
172 	/**
173 	 * @copy_to_devmem: Copy to device memory (required for migration)
174 	 * @pages: Pointer to array of device memory pages (destination)
175 	 * @pagemap_addr: Pointer to array of DMA information (source)
176 	 * @npages: Number of pages to copy
177 	 *
178 	 * Copy pages to device memory. If the order of a @pagemap_addr entry
179 	 * is greater than 0, the entry is populated but subsequent entries
180 	 * within the range of that order are not populated.
181 	 *
182 	 * Return: 0 on success, a negative error code on failure.
183 	 */
184 	int (*copy_to_devmem)(struct page **pages,
185 			      struct drm_pagemap_addr *pagemap_addr,
186 			      unsigned long npages);
187 
188 	/**
189 	 * @copy_to_ram: Copy to system RAM (required for migration)
190 	 * @pages: Pointer to array of device memory pages (source)
191 	 * @pagemap_addr: Pointer to array of DMA information (destination)
192 	 * @npages: Number of pages to copy
193 	 *
194 	 * Copy pages to system RAM. If the order of a @pagemap_addr entry
195 	 * is greater than 0, the entry is populated but subsequent entries
196 	 * within the range of that order are not populated.
197 	 *
198 	 * Return: 0 on success, a negative error code on failure.
199 	 */
200 	int (*copy_to_ram)(struct page **pages,
201 			   struct drm_pagemap_addr *pagemap_addr,
202 			   unsigned long npages);
203 };
204 
205 /**
206  * struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation
207  *
208  * @dev: Pointer to the device structure which device memory allocation belongs to
209  * @mm: Pointer to the mm_struct for the address space
210  * @detached: device memory allocations is detached from device pages
211  * @ops: Pointer to the operations structure for GPU SVM device memory
212  * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
213  * @size: Size of device memory allocation
214  * @timeslice_expiration: Timeslice expiration in jiffies
215  */
216 struct drm_pagemap_devmem {
217 	struct device *dev;
218 	struct mm_struct *mm;
219 	struct completion detached;
220 	const struct drm_pagemap_devmem_ops *ops;
221 	struct drm_pagemap *dpagemap;
222 	size_t size;
223 	u64 timeslice_expiration;
224 };
225 
226 int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
227 				  struct mm_struct *mm,
228 				  unsigned long start, unsigned long end,
229 				  unsigned long timeslice_ms,
230 				  void *pgmap_owner);
231 
232 int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation);
233 
234 const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void);
235 
236 struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page);
237 
238 void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
239 			     struct device *dev, struct mm_struct *mm,
240 			     const struct drm_pagemap_devmem_ops *ops,
241 			     struct drm_pagemap *dpagemap, size_t size);
242 
243 int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
244 			    unsigned long start, unsigned long end,
245 			    struct mm_struct *mm,
246 			    unsigned long timeslice_ms);
247 
248 #endif
249