xref: /linux/drivers/gpu/drm/imagination/pvr_gem.c (revision d231cde7c84359fb18fb268cf6cff03b5bce48ff)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_device.h"
5 #include "pvr_gem.h"
6 #include "pvr_vm.h"
7 
8 #include <drm/drm_gem.h>
9 #include <drm/drm_prime.h>
10 
11 #include <linux/compiler.h>
12 #include <linux/compiler_attributes.h>
13 #include <linux/dma-buf.h>
14 #include <linux/dma-direction.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/gfp.h>
18 #include <linux/iosys-map.h>
19 #include <linux/log2.h>
20 #include <linux/mutex.h>
21 #include <linux/pagemap.h>
22 #include <linux/refcount.h>
23 #include <linux/scatterlist.h>
24 
25 static void pvr_gem_object_free(struct drm_gem_object *obj)
26 {
27 	drm_gem_shmem_object_free(obj);
28 }
29 
30 static int pvr_gem_mmap(struct drm_gem_object *gem_obj, struct vm_area_struct *vma)
31 {
32 	struct pvr_gem_object *pvr_obj = gem_to_pvr_gem(gem_obj);
33 	struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
34 
35 	if (!(pvr_obj->flags & DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS))
36 		return -EINVAL;
37 
38 	return drm_gem_shmem_mmap(shmem_obj, vma);
39 }
40 
41 static const struct drm_gem_object_funcs pvr_gem_object_funcs = {
42 	.free = pvr_gem_object_free,
43 	.print_info = drm_gem_shmem_object_print_info,
44 	.pin = drm_gem_shmem_object_pin,
45 	.unpin = drm_gem_shmem_object_unpin,
46 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
47 	.vmap = drm_gem_shmem_object_vmap,
48 	.vunmap = drm_gem_shmem_object_vunmap,
49 	.mmap = pvr_gem_mmap,
50 	.vm_ops = &drm_gem_shmem_vm_ops,
51 };
52 
53 /**
54  * pvr_gem_object_flags_validate() - Verify that a collection of PowerVR GEM
55  * mapping and/or creation flags form a valid combination.
56  * @flags: PowerVR GEM mapping/creation flags to validate.
57  *
58  * This function explicitly allows kernel-only flags. All ioctl entrypoints
59  * should do their own validation as well as relying on this function.
60  *
61  * Return:
62  *  * %true if @flags contains valid mapping and/or creation flags, or
63  *  * %false otherwise.
64  */
65 static bool
66 pvr_gem_object_flags_validate(u64 flags)
67 {
68 	static const u64 invalid_combinations[] = {
69 		/*
70 		 * Memory flagged as PM/FW-protected cannot be mapped to
71 		 * userspace. To make this explicit, we require that the two
72 		 * flags allowing each of these respective features are never
73 		 * specified together.
74 		 */
75 		(DRM_PVR_BO_PM_FW_PROTECT |
76 		 DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS),
77 	};
78 
79 	/*
80 	 * Check for bits set in undefined regions. Reserved regions refer to
81 	 * options that can only be set by the kernel. These are explicitly
82 	 * allowed in most cases, and must be checked specifically in IOCTL
83 	 * callback code.
84 	 */
85 	if ((flags & PVR_BO_UNDEFINED_MASK) != 0)
86 		return false;
87 
88 	/*
89 	 * Check for all combinations of flags marked as invalid in the array
90 	 * above.
91 	 */
92 	for (int i = 0; i < ARRAY_SIZE(invalid_combinations); ++i) {
93 		u64 combo = invalid_combinations[i];
94 
95 		if ((flags & combo) == combo)
96 			return false;
97 	}
98 
99 	return true;
100 }
101 
102 /**
103  * pvr_gem_object_into_handle() - Convert a reference to an object into a
104  * userspace-accessible handle.
105  * @pvr_obj: [IN] Target PowerVR-specific object.
106  * @pvr_file: [IN] File to associate the handle with.
107  * @handle: [OUT] Pointer to store the created handle in. Remains unmodified if
108  * an error is encountered.
109  *
110  * If an error is encountered, ownership of @pvr_obj will not have been
111  * transferred. If this function succeeds, however, further use of @pvr_obj is
112  * considered undefined behaviour unless another reference to it is explicitly
113  * held.
114  *
115  * Return:
116  *  * 0 on success, or
117  *  * Any error encountered while attempting to allocate a handle on @pvr_file.
118  */
119 int
120 pvr_gem_object_into_handle(struct pvr_gem_object *pvr_obj,
121 			   struct pvr_file *pvr_file, u32 *handle)
122 {
123 	struct drm_gem_object *gem_obj = gem_from_pvr_gem(pvr_obj);
124 	struct drm_file *file = from_pvr_file(pvr_file);
125 
126 	u32 new_handle;
127 	int err;
128 
129 	err = drm_gem_handle_create(file, gem_obj, &new_handle);
130 	if (err)
131 		return err;
132 
133 	/*
134 	 * Release our reference to @pvr_obj, effectively transferring
135 	 * ownership to the handle.
136 	 */
137 	pvr_gem_object_put(pvr_obj);
138 
139 	/*
140 	 * Do not store the new handle in @handle until no more errors can
141 	 * occur.
142 	 */
143 	*handle = new_handle;
144 
145 	return 0;
146 }
147 
148 /**
149  * pvr_gem_object_from_handle() - Obtain a reference to an object from a
150  * userspace handle.
151  * @pvr_file: PowerVR-specific file to which @handle is associated.
152  * @handle: Userspace handle referencing the target object.
153  *
154  * On return, @handle always maintains its reference to the requested object
155  * (if it had one in the first place). If this function succeeds, the returned
156  * object will hold an additional reference. When the caller is finished with
157  * the returned object, they should call pvr_gem_object_put() on it to release
158  * this reference.
159  *
160  * Return:
161  *  * A pointer to the requested PowerVR-specific object on success, or
162  *  * %NULL otherwise.
163  */
164 struct pvr_gem_object *
165 pvr_gem_object_from_handle(struct pvr_file *pvr_file, u32 handle)
166 {
167 	struct drm_file *file = from_pvr_file(pvr_file);
168 	struct drm_gem_object *gem_obj;
169 
170 	gem_obj = drm_gem_object_lookup(file, handle);
171 	if (!gem_obj)
172 		return NULL;
173 
174 	return gem_to_pvr_gem(gem_obj);
175 }
176 
177 /**
178  * pvr_gem_object_vmap() - Map a PowerVR GEM object into CPU virtual address
179  * space.
180  * @pvr_obj: Target PowerVR GEM object.
181  *
182  * Once the caller is finished with the CPU mapping, they must call
183  * pvr_gem_object_vunmap() on @pvr_obj.
184  *
185  * If @pvr_obj is CPU-cached, dma_sync_sgtable_for_cpu() is called to make
186  * sure the CPU mapping is consistent.
187  *
188  * Return:
189  *  * A pointer to the CPU mapping on success,
190  *  * -%ENOMEM if the mapping fails, or
191  *  * Any error encountered while attempting to acquire a reference to the
192  *    backing pages for @pvr_obj.
193  */
194 void *
195 pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj)
196 {
197 	struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
198 	struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
199 	struct iosys_map map;
200 	int err;
201 
202 	dma_resv_lock(obj->resv, NULL);
203 
204 	err = drm_gem_shmem_vmap_locked(shmem_obj, &map);
205 	if (err)
206 		goto err_unlock;
207 
208 	if (pvr_obj->flags & PVR_BO_CPU_CACHED) {
209 		struct device *dev = shmem_obj->base.dev->dev;
210 
211 		/* If shmem_obj->sgt is NULL, that means the buffer hasn't been mapped
212 		 * in GPU space yet.
213 		 */
214 		if (shmem_obj->sgt)
215 			dma_sync_sgtable_for_cpu(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
216 	}
217 
218 	dma_resv_unlock(obj->resv);
219 
220 	return map.vaddr;
221 
222 err_unlock:
223 	dma_resv_unlock(obj->resv);
224 
225 	return ERR_PTR(err);
226 }
227 
228 /**
229  * pvr_gem_object_vunmap() - Unmap a PowerVR memory object from CPU virtual
230  * address space.
231  * @pvr_obj: Target PowerVR GEM object.
232  *
233  * If @pvr_obj is CPU-cached, dma_sync_sgtable_for_device() is called to make
234  * sure the GPU mapping is consistent.
235  */
236 void
237 pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj)
238 {
239 	struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
240 	struct iosys_map map = IOSYS_MAP_INIT_VADDR(shmem_obj->vaddr);
241 	struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
242 
243 	if (WARN_ON(!map.vaddr))
244 		return;
245 
246 	dma_resv_lock(obj->resv, NULL);
247 
248 	if (pvr_obj->flags & PVR_BO_CPU_CACHED) {
249 		struct device *dev = shmem_obj->base.dev->dev;
250 
251 		/* If shmem_obj->sgt is NULL, that means the buffer hasn't been mapped
252 		 * in GPU space yet.
253 		 */
254 		if (shmem_obj->sgt)
255 			dma_sync_sgtable_for_device(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
256 	}
257 
258 	drm_gem_shmem_vunmap_locked(shmem_obj, &map);
259 
260 	dma_resv_unlock(obj->resv);
261 }
262 
263 /**
264  * pvr_gem_object_zero() - Zeroes the physical memory behind an object.
265  * @pvr_obj: Target PowerVR GEM object.
266  *
267  * Return:
268  *  * 0 on success, or
269  *  * Any error encountered while attempting to map @pvr_obj to the CPU (see
270  *    pvr_gem_object_vmap()).
271  */
272 static int
273 pvr_gem_object_zero(struct pvr_gem_object *pvr_obj)
274 {
275 	void *cpu_ptr;
276 
277 	cpu_ptr = pvr_gem_object_vmap(pvr_obj);
278 	if (IS_ERR(cpu_ptr))
279 		return PTR_ERR(cpu_ptr);
280 
281 	memset(cpu_ptr, 0, pvr_gem_object_size(pvr_obj));
282 
283 	/* Make sure the zero-ing is done before vumap-ing the object. */
284 	wmb();
285 
286 	pvr_gem_object_vunmap(pvr_obj);
287 
288 	return 0;
289 }
290 
291 /**
292  * pvr_gem_create_object() - Allocate and pre-initializes a pvr_gem_object
293  * @drm_dev: DRM device creating this object.
294  * @size: Size of the object to allocate in bytes.
295  *
296  * Return:
297  *  * The new pre-initialized GEM object on success,
298  *  * -ENOMEM if the allocation failed.
299  */
300 struct drm_gem_object *pvr_gem_create_object(struct drm_device *drm_dev, size_t size)
301 {
302 	struct drm_gem_object *gem_obj;
303 	struct pvr_gem_object *pvr_obj;
304 
305 	pvr_obj = kzalloc(sizeof(*pvr_obj), GFP_KERNEL);
306 	if (!pvr_obj)
307 		return ERR_PTR(-ENOMEM);
308 
309 	gem_obj = gem_from_pvr_gem(pvr_obj);
310 	gem_obj->funcs = &pvr_gem_object_funcs;
311 
312 	return gem_obj;
313 }
314 
315 /**
316  * pvr_gem_object_create() - Creates a PowerVR-specific buffer object.
317  * @pvr_dev: Target PowerVR device.
318  * @size: Size of the object to allocate in bytes. Must be greater than zero.
319  * Any value which is not an exact multiple of the system page size will be
320  * rounded up to satisfy this condition.
321  * @flags: Options which affect both this operation and future mapping
322  * operations performed on the returned object. Must be a combination of
323  * DRM_PVR_BO_* and/or PVR_BO_* flags.
324  *
325  * The created object may be larger than @size, but can never be smaller. To
326  * get the exact size, call pvr_gem_object_size() on the returned pointer.
327  *
328  * Return:
329  *  * The newly-minted PowerVR-specific buffer object on success,
330  *  * -%EINVAL if @size is zero or @flags is not valid,
331  *  * -%ENOMEM if sufficient physical memory cannot be allocated, or
332  *  * Any other error returned by drm_gem_create_mmap_offset().
333  */
334 struct pvr_gem_object *
335 pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
336 {
337 	struct drm_gem_shmem_object *shmem_obj;
338 	struct pvr_gem_object *pvr_obj;
339 	struct sg_table *sgt;
340 	int err;
341 
342 	/* Verify @size and @flags before continuing. */
343 	if (size == 0 || !pvr_gem_object_flags_validate(flags))
344 		return ERR_PTR(-EINVAL);
345 
346 	shmem_obj = drm_gem_shmem_create(from_pvr_device(pvr_dev), size);
347 	if (IS_ERR(shmem_obj))
348 		return ERR_CAST(shmem_obj);
349 
350 	shmem_obj->pages_mark_dirty_on_put = true;
351 	shmem_obj->map_wc = !(flags & PVR_BO_CPU_CACHED);
352 	pvr_obj = shmem_gem_to_pvr_gem(shmem_obj);
353 	pvr_obj->flags = flags;
354 
355 	sgt = drm_gem_shmem_get_pages_sgt(shmem_obj);
356 	if (IS_ERR(sgt)) {
357 		err = PTR_ERR(sgt);
358 		goto err_shmem_object_free;
359 	}
360 
361 	dma_sync_sgtable_for_device(shmem_obj->base.dev->dev, sgt,
362 				    DMA_BIDIRECTIONAL);
363 
364 	/*
365 	 * Do this last because pvr_gem_object_zero() requires a fully
366 	 * configured instance of struct pvr_gem_object.
367 	 */
368 	pvr_gem_object_zero(pvr_obj);
369 
370 	return pvr_obj;
371 
372 err_shmem_object_free:
373 	drm_gem_shmem_free(shmem_obj);
374 
375 	return ERR_PTR(err);
376 }
377 
378 /**
379  * pvr_gem_get_dma_addr() - Get DMA address for given offset in object
380  * @pvr_obj: Pointer to object to lookup address in.
381  * @offset: Offset within object to lookup address at.
382  * @dma_addr_out: Pointer to location to store DMA address.
383  *
384  * Returns:
385  *  * 0 on success, or
386  *  * -%EINVAL if object is not currently backed, or if @offset is out of valid
387  *    range for this object.
388  */
389 int
390 pvr_gem_get_dma_addr(struct pvr_gem_object *pvr_obj, u32 offset,
391 		     dma_addr_t *dma_addr_out)
392 {
393 	struct drm_gem_shmem_object *shmem_obj = shmem_gem_from_pvr_gem(pvr_obj);
394 	u32 accumulated_offset = 0;
395 	struct scatterlist *sgl;
396 	unsigned int sgt_idx;
397 
398 	WARN_ON(!shmem_obj->sgt);
399 	for_each_sgtable_dma_sg(shmem_obj->sgt, sgl, sgt_idx) {
400 		u32 new_offset = accumulated_offset + sg_dma_len(sgl);
401 
402 		if (offset >= accumulated_offset && offset < new_offset) {
403 			*dma_addr_out = sg_dma_address(sgl) +
404 					(offset - accumulated_offset);
405 			return 0;
406 		}
407 
408 		accumulated_offset = new_offset;
409 	}
410 
411 	return -EINVAL;
412 }
413