xref: /linux/drivers/gpu/drm/imagination/pvr_drv.c (revision cdd30ebb1b9f36159d66f088b61aee264e649d7a)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_context.h"
5 #include "pvr_debugfs.h"
6 #include "pvr_device.h"
7 #include "pvr_drv.h"
8 #include "pvr_free_list.h"
9 #include "pvr_gem.h"
10 #include "pvr_hwrt.h"
11 #include "pvr_job.h"
12 #include "pvr_mmu.h"
13 #include "pvr_power.h"
14 #include "pvr_rogue_defs.h"
15 #include "pvr_rogue_fwif_client.h"
16 #include "pvr_rogue_fwif_shared.h"
17 #include "pvr_vm.h"
18 
19 #include <uapi/drm/pvr_drm.h>
20 
21 #include <drm/drm_device.h>
22 #include <drm/drm_drv.h>
23 #include <drm/drm_file.h>
24 #include <drm/drm_gem.h>
25 #include <drm/drm_ioctl.h>
26 
27 #include <linux/err.h>
28 #include <linux/export.h>
29 #include <linux/fs.h>
30 #include <linux/kernel.h>
31 #include <linux/list.h>
32 #include <linux/mod_devicetable.h>
33 #include <linux/module.h>
34 #include <linux/moduleparam.h>
35 #include <linux/of_device.h>
36 #include <linux/of_platform.h>
37 #include <linux/platform_device.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/xarray.h>
40 
41 /**
42  * DOC: PowerVR (Series 6 and later) and IMG Graphics Driver
43  *
44  * This driver supports the following PowerVR/IMG graphics cores from Imagination Technologies:
45  *
46  * * AXE-1-16M (found in Texas Instruments AM62)
47  */
48 
49 /**
50  * pvr_ioctl_create_bo() - IOCTL to create a GEM buffer object.
51  * @drm_dev: [IN] Target DRM device.
52  * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
53  * &struct drm_pvr_ioctl_create_bo_args.
54  * @file: [IN] DRM file-private data.
55  *
56  * Called from userspace with %DRM_IOCTL_PVR_CREATE_BO.
57  *
58  * Return:
59  *  * 0 on success,
60  *  * -%EINVAL if the value of &drm_pvr_ioctl_create_bo_args.size is zero
61  *    or wider than &typedef size_t,
62  *  * -%EINVAL if any bits in &drm_pvr_ioctl_create_bo_args.flags that are
63  *    reserved or undefined are set,
64  *  * -%EINVAL if any padding fields in &drm_pvr_ioctl_create_bo_args are not
65  *    zero,
66  *  * Any error encountered while creating the object (see
67  *    pvr_gem_object_create()), or
68  *  * Any error encountered while transferring ownership of the object into a
69  *    userspace-accessible handle (see pvr_gem_object_into_handle()).
70  */
71 static int
pvr_ioctl_create_bo(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)72 pvr_ioctl_create_bo(struct drm_device *drm_dev, void *raw_args,
73 		    struct drm_file *file)
74 {
75 	struct drm_pvr_ioctl_create_bo_args *args = raw_args;
76 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
77 	struct pvr_file *pvr_file = to_pvr_file(file);
78 
79 	struct pvr_gem_object *pvr_obj;
80 	size_t sanitized_size;
81 
82 	int idx;
83 	int err;
84 
85 	if (!drm_dev_enter(drm_dev, &idx))
86 		return -EIO;
87 
88 	/* All padding fields must be zeroed. */
89 	if (args->_padding_c != 0) {
90 		err = -EINVAL;
91 		goto err_drm_dev_exit;
92 	}
93 
94 	/*
95 	 * On 64-bit platforms (our primary target), size_t is a u64. However,
96 	 * on other architectures we have to check for overflow when casting
97 	 * down to size_t from u64.
98 	 *
99 	 * We also disallow zero-sized allocations, and reserved (kernel-only)
100 	 * flags.
101 	 */
102 	if (args->size > SIZE_MAX || args->size == 0 || args->flags &
103 	    ~DRM_PVR_BO_FLAGS_MASK || args->size & (PVR_DEVICE_PAGE_SIZE - 1)) {
104 		err = -EINVAL;
105 		goto err_drm_dev_exit;
106 	}
107 
108 	sanitized_size = (size_t)args->size;
109 
110 	/*
111 	 * Create a buffer object and transfer ownership to a userspace-
112 	 * accessible handle.
113 	 */
114 	pvr_obj = pvr_gem_object_create(pvr_dev, sanitized_size, args->flags);
115 	if (IS_ERR(pvr_obj)) {
116 		err = PTR_ERR(pvr_obj);
117 		goto err_drm_dev_exit;
118 	}
119 
120 	/* This function will not modify &args->handle unless it succeeds. */
121 	err = pvr_gem_object_into_handle(pvr_obj, pvr_file, &args->handle);
122 	if (err)
123 		goto err_destroy_obj;
124 
125 	drm_dev_exit(idx);
126 
127 	return 0;
128 
129 err_destroy_obj:
130 	/*
131 	 * GEM objects are refcounted, so there is no explicit destructor
132 	 * function. Instead, we release the singular reference we currently
133 	 * hold on the object and let GEM take care of the rest.
134 	 */
135 	pvr_gem_object_put(pvr_obj);
136 
137 err_drm_dev_exit:
138 	drm_dev_exit(idx);
139 
140 	return err;
141 }
142 
143 /**
144  * pvr_ioctl_get_bo_mmap_offset() - IOCTL to generate a "fake" offset to be
145  * used when calling mmap() from userspace to map the given GEM buffer object
146  * @drm_dev: [IN] DRM device (unused).
147  * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
148  *                     &struct drm_pvr_ioctl_get_bo_mmap_offset_args.
149  * @file: [IN] DRM file private data.
150  *
151  * Called from userspace with %DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET.
152  *
153  * This IOCTL does *not* perform an mmap. See the docs on
154  * &struct drm_pvr_ioctl_get_bo_mmap_offset_args for details.
155  *
156  * Return:
157  *  * 0 on success,
158  *  * -%ENOENT if the handle does not reference a valid GEM buffer object,
159  *  * -%EINVAL if any padding fields in &struct
160  *    drm_pvr_ioctl_get_bo_mmap_offset_args are not zero, or
161  *  * Any error returned by drm_gem_create_mmap_offset().
162  */
163 static int
pvr_ioctl_get_bo_mmap_offset(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)164 pvr_ioctl_get_bo_mmap_offset(struct drm_device *drm_dev, void *raw_args,
165 			     struct drm_file *file)
166 {
167 	struct drm_pvr_ioctl_get_bo_mmap_offset_args *args = raw_args;
168 	struct pvr_file *pvr_file = to_pvr_file(file);
169 	struct pvr_gem_object *pvr_obj;
170 	struct drm_gem_object *gem_obj;
171 	int idx;
172 	int ret;
173 
174 	if (!drm_dev_enter(drm_dev, &idx))
175 		return -EIO;
176 
177 	/* All padding fields must be zeroed. */
178 	if (args->_padding_4 != 0) {
179 		ret = -EINVAL;
180 		goto err_drm_dev_exit;
181 	}
182 
183 	/*
184 	 * Obtain a kernel reference to the buffer object. This reference is
185 	 * counted and must be manually dropped before returning. If a buffer
186 	 * object cannot be found for the specified handle, return -%ENOENT (No
187 	 * such file or directory).
188 	 */
189 	pvr_obj = pvr_gem_object_from_handle(pvr_file, args->handle);
190 	if (!pvr_obj) {
191 		ret = -ENOENT;
192 		goto err_drm_dev_exit;
193 	}
194 
195 	gem_obj = gem_from_pvr_gem(pvr_obj);
196 
197 	/*
198 	 * Allocate a fake offset which can be used in userspace calls to mmap
199 	 * on the DRM device file. If this fails, return the error code. This
200 	 * operation is idempotent.
201 	 */
202 	ret = drm_gem_create_mmap_offset(gem_obj);
203 	if (ret != 0) {
204 		/* Drop our reference to the buffer object. */
205 		drm_gem_object_put(gem_obj);
206 		goto err_drm_dev_exit;
207 	}
208 
209 	/*
210 	 * Read out the fake offset allocated by the earlier call to
211 	 * drm_gem_create_mmap_offset.
212 	 */
213 	args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
214 
215 	/* Drop our reference to the buffer object. */
216 	pvr_gem_object_put(pvr_obj);
217 
218 err_drm_dev_exit:
219 	drm_dev_exit(idx);
220 
221 	return ret;
222 }
223 
224 static __always_inline __maybe_unused u64
pvr_fw_version_packed(u32 major,u32 minor)225 pvr_fw_version_packed(u32 major, u32 minor)
226 {
227 	return ((u64)major << 32) | minor;
228 }
229 
230 static u32
rogue_get_common_store_partition_space_size(struct pvr_device * pvr_dev)231 rogue_get_common_store_partition_space_size(struct pvr_device *pvr_dev)
232 {
233 	u32 max_partitions = 0;
234 	u32 tile_size_x = 0;
235 	u32 tile_size_y = 0;
236 
237 	PVR_FEATURE_VALUE(pvr_dev, tile_size_x, &tile_size_x);
238 	PVR_FEATURE_VALUE(pvr_dev, tile_size_y, &tile_size_y);
239 	PVR_FEATURE_VALUE(pvr_dev, max_partitions, &max_partitions);
240 
241 	if (tile_size_x == 16 && tile_size_y == 16) {
242 		u32 usc_min_output_registers_per_pix = 0;
243 
244 		PVR_FEATURE_VALUE(pvr_dev, usc_min_output_registers_per_pix,
245 				  &usc_min_output_registers_per_pix);
246 
247 		return tile_size_x * tile_size_y * max_partitions *
248 		       usc_min_output_registers_per_pix;
249 	}
250 
251 	return max_partitions * 1024;
252 }
253 
254 static u32
rogue_get_common_store_alloc_region_size(struct pvr_device * pvr_dev)255 rogue_get_common_store_alloc_region_size(struct pvr_device *pvr_dev)
256 {
257 	u32 common_store_size_in_dwords = 512 * 4 * 4;
258 	u32 alloc_region_size;
259 
260 	PVR_FEATURE_VALUE(pvr_dev, common_store_size_in_dwords, &common_store_size_in_dwords);
261 
262 	alloc_region_size = common_store_size_in_dwords - (256U * 4U) -
263 			    rogue_get_common_store_partition_space_size(pvr_dev);
264 
265 	if (PVR_HAS_QUIRK(pvr_dev, 44079)) {
266 		u32 common_store_split_point = (768U * 4U * 4U);
267 
268 		return min(common_store_split_point - (256U * 4U), alloc_region_size);
269 	}
270 
271 	return alloc_region_size;
272 }
273 
274 static inline u32
rogue_get_num_phantoms(struct pvr_device * pvr_dev)275 rogue_get_num_phantoms(struct pvr_device *pvr_dev)
276 {
277 	u32 num_clusters = 1;
278 
279 	PVR_FEATURE_VALUE(pvr_dev, num_clusters, &num_clusters);
280 
281 	return ROGUE_REQ_NUM_PHANTOMS(num_clusters);
282 }
283 
284 static inline u32
rogue_get_max_coeffs(struct pvr_device * pvr_dev)285 rogue_get_max_coeffs(struct pvr_device *pvr_dev)
286 {
287 	u32 max_coeff_additional_portion = ROGUE_MAX_VERTEX_SHARED_REGISTERS;
288 	u32 pending_allocation_shared_regs = 2U * 1024U;
289 	u32 pending_allocation_coeff_regs = 0U;
290 	u32 num_phantoms = rogue_get_num_phantoms(pvr_dev);
291 	u32 tiles_in_flight = 0;
292 	u32 max_coeff_pixel_portion;
293 
294 	PVR_FEATURE_VALUE(pvr_dev, isp_max_tiles_in_flight, &tiles_in_flight);
295 	max_coeff_pixel_portion = DIV_ROUND_UP(tiles_in_flight, num_phantoms);
296 	max_coeff_pixel_portion *= ROGUE_MAX_PIXEL_SHARED_REGISTERS;
297 
298 	/*
299 	 * Compute tasks on cores with BRN48492 and without compute overlap may lock
300 	 * up without two additional lines of coeffs.
301 	 */
302 	if (PVR_HAS_QUIRK(pvr_dev, 48492) && !PVR_HAS_FEATURE(pvr_dev, compute_overlap))
303 		pending_allocation_coeff_regs = 2U * 1024U;
304 
305 	if (PVR_HAS_ENHANCEMENT(pvr_dev, 38748))
306 		pending_allocation_shared_regs = 0;
307 
308 	if (PVR_HAS_ENHANCEMENT(pvr_dev, 38020))
309 		max_coeff_additional_portion += ROGUE_MAX_COMPUTE_SHARED_REGISTERS;
310 
311 	return rogue_get_common_store_alloc_region_size(pvr_dev) + pending_allocation_coeff_regs -
312 		(max_coeff_pixel_portion + max_coeff_additional_portion +
313 		 pending_allocation_shared_regs);
314 }
315 
316 static inline u32
rogue_get_cdm_max_local_mem_size_regs(struct pvr_device * pvr_dev)317 rogue_get_cdm_max_local_mem_size_regs(struct pvr_device *pvr_dev)
318 {
319 	u32 available_coeffs_in_dwords = rogue_get_max_coeffs(pvr_dev);
320 
321 	if (PVR_HAS_QUIRK(pvr_dev, 48492) && PVR_HAS_FEATURE(pvr_dev, roguexe) &&
322 	    !PVR_HAS_FEATURE(pvr_dev, compute_overlap)) {
323 		/* Driver must not use the 2 reserved lines. */
324 		available_coeffs_in_dwords -= ROGUE_CSRM_LINE_SIZE_IN_DWORDS * 2;
325 	}
326 
327 	/*
328 	 * The maximum amount of local memory available to a kernel is the minimum
329 	 * of the total number of coefficient registers available and the max common
330 	 * store allocation size which can be made by the CDM.
331 	 *
332 	 * If any coeff lines are reserved for tessellation or pixel then we need to
333 	 * subtract those too.
334 	 */
335 	return min(available_coeffs_in_dwords, (u32)ROGUE_MAX_PER_KERNEL_LOCAL_MEM_SIZE_REGS);
336 }
337 
338 /**
339  * pvr_dev_query_gpu_info_get()
340  * @pvr_dev: Device pointer.
341  * @args: [IN] Device query arguments containing a pointer to a userspace
342  *        struct drm_pvr_dev_query_gpu_info.
343  *
344  * If the query object pointer is NULL, the size field is updated with the
345  * expected size of the query object.
346  *
347  * Returns:
348  *  * 0 on success, or if size is requested using a NULL pointer, or
349  *  * -%E2BIG if the indicated length of the allocation is less than is
350  *    required to contain the copied data, or
351  *  * -%EFAULT if local memory could not be copied to userspace.
352  */
353 static int
pvr_dev_query_gpu_info_get(struct pvr_device * pvr_dev,struct drm_pvr_ioctl_dev_query_args * args)354 pvr_dev_query_gpu_info_get(struct pvr_device *pvr_dev,
355 			   struct drm_pvr_ioctl_dev_query_args *args)
356 {
357 	struct drm_pvr_dev_query_gpu_info gpu_info = {0};
358 	int err;
359 
360 	if (!args->pointer) {
361 		args->size = sizeof(struct drm_pvr_dev_query_gpu_info);
362 		return 0;
363 	}
364 
365 	gpu_info.gpu_id =
366 		pvr_gpu_id_to_packed_bvnc(&pvr_dev->gpu_id);
367 	gpu_info.num_phantoms = rogue_get_num_phantoms(pvr_dev);
368 
369 	err = PVR_UOBJ_SET(args->pointer, args->size, gpu_info);
370 	if (err < 0)
371 		return err;
372 
373 	if (args->size > sizeof(gpu_info))
374 		args->size = sizeof(gpu_info);
375 	return 0;
376 }
377 
378 /**
379  * pvr_dev_query_runtime_info_get()
380  * @pvr_dev: Device pointer.
381  * @args: [IN] Device query arguments containing a pointer to a userspace
382  *        struct drm_pvr_dev_query_runtime_info.
383  *
384  * If the query object pointer is NULL, the size field is updated with the
385  * expected size of the query object.
386  *
387  * Returns:
388  *  * 0 on success, or if size is requested using a NULL pointer, or
389  *  * -%E2BIG if the indicated length of the allocation is less than is
390  *    required to contain the copied data, or
391  *  * -%EFAULT if local memory could not be copied to userspace.
392  */
393 static int
pvr_dev_query_runtime_info_get(struct pvr_device * pvr_dev,struct drm_pvr_ioctl_dev_query_args * args)394 pvr_dev_query_runtime_info_get(struct pvr_device *pvr_dev,
395 			       struct drm_pvr_ioctl_dev_query_args *args)
396 {
397 	struct drm_pvr_dev_query_runtime_info runtime_info = {0};
398 	int err;
399 
400 	if (!args->pointer) {
401 		args->size = sizeof(struct drm_pvr_dev_query_runtime_info);
402 		return 0;
403 	}
404 
405 	runtime_info.free_list_min_pages =
406 		pvr_get_free_list_min_pages(pvr_dev);
407 	runtime_info.free_list_max_pages =
408 		ROGUE_PM_MAX_FREELIST_SIZE / ROGUE_PM_PAGE_SIZE;
409 	runtime_info.common_store_alloc_region_size =
410 		rogue_get_common_store_alloc_region_size(pvr_dev);
411 	runtime_info.common_store_partition_space_size =
412 		rogue_get_common_store_partition_space_size(pvr_dev);
413 	runtime_info.max_coeffs = rogue_get_max_coeffs(pvr_dev);
414 	runtime_info.cdm_max_local_mem_size_regs =
415 		rogue_get_cdm_max_local_mem_size_regs(pvr_dev);
416 
417 	err = PVR_UOBJ_SET(args->pointer, args->size, runtime_info);
418 	if (err < 0)
419 		return err;
420 
421 	if (args->size > sizeof(runtime_info))
422 		args->size = sizeof(runtime_info);
423 	return 0;
424 }
425 
426 /**
427  * pvr_dev_query_quirks_get() - Unpack array of quirks at the address given
428  * in a struct drm_pvr_dev_query_quirks, or gets the amount of space required
429  * for it.
430  * @pvr_dev: Device pointer.
431  * @args: [IN] Device query arguments containing a pointer to a userspace
432  *        struct drm_pvr_dev_query_query_quirks.
433  *
434  * If the query object pointer is NULL, the size field is updated with the
435  * expected size of the query object.
436  * If the userspace pointer in the query object is NULL, or the count is
437  * short, no data is copied.
438  * The count field will be updated to that copied, or if either pointer is
439  * NULL, that which would have been copied.
440  * The size field in the query object will be updated to the size copied.
441  *
442  * Returns:
443  *  * 0 on success, or if size/count is requested using a NULL pointer, or
444  *  * -%EINVAL if args contained non-zero reserved fields, or
445  *  * -%E2BIG if the indicated length of the allocation is less than is
446  *    required to contain the copied data, or
447  *  * -%EFAULT if local memory could not be copied to userspace.
448  */
449 static int
pvr_dev_query_quirks_get(struct pvr_device * pvr_dev,struct drm_pvr_ioctl_dev_query_args * args)450 pvr_dev_query_quirks_get(struct pvr_device *pvr_dev,
451 			 struct drm_pvr_ioctl_dev_query_args *args)
452 {
453 	/*
454 	 * @FIXME - hardcoding of numbers here is intended as an
455 	 * intermediate step so the UAPI can be fixed, but requires a
456 	 * a refactor in the future to store them in a more appropriate
457 	 * location
458 	 */
459 	static const u32 umd_quirks_musthave[] = {
460 		47217,
461 		49927,
462 		62269,
463 	};
464 	static const u32 umd_quirks[] = {
465 		48545,
466 		51764,
467 	};
468 	struct drm_pvr_dev_query_quirks query;
469 	u32 out[ARRAY_SIZE(umd_quirks_musthave) + ARRAY_SIZE(umd_quirks)];
470 	size_t out_musthave_count = 0;
471 	size_t out_count = 0;
472 	int err;
473 
474 	if (!args->pointer) {
475 		args->size = sizeof(struct drm_pvr_dev_query_quirks);
476 		return 0;
477 	}
478 
479 	err = PVR_UOBJ_GET(query, args->size, args->pointer);
480 
481 	if (err < 0)
482 		return err;
483 	if (query._padding_c)
484 		return -EINVAL;
485 
486 	for (int i = 0; i < ARRAY_SIZE(umd_quirks_musthave); i++) {
487 		if (pvr_device_has_uapi_quirk(pvr_dev, umd_quirks_musthave[i])) {
488 			out[out_count++] = umd_quirks_musthave[i];
489 			out_musthave_count++;
490 		}
491 	}
492 
493 	for (int i = 0; i < ARRAY_SIZE(umd_quirks); i++) {
494 		if (pvr_device_has_uapi_quirk(pvr_dev, umd_quirks[i]))
495 			out[out_count++] = umd_quirks[i];
496 	}
497 
498 	if (!query.quirks)
499 		goto copy_out;
500 	if (query.count < out_count)
501 		return -E2BIG;
502 
503 	if (copy_to_user(u64_to_user_ptr(query.quirks), out,
504 			 out_count * sizeof(u32))) {
505 		return -EFAULT;
506 	}
507 
508 	query.musthave_count = out_musthave_count;
509 
510 copy_out:
511 	query.count = out_count;
512 	err = PVR_UOBJ_SET(args->pointer, args->size, query);
513 	if (err < 0)
514 		return err;
515 
516 	args->size = sizeof(query);
517 	return 0;
518 }
519 
520 /**
521  * pvr_dev_query_enhancements_get() - Unpack array of enhancements at the
522  * address given in a struct drm_pvr_dev_query_enhancements, or gets the amount
523  * of space required for it.
524  * @pvr_dev: Device pointer.
525  * @args: [IN] Device query arguments containing a pointer to a userspace
526  *        struct drm_pvr_dev_query_enhancements.
527  *
528  * If the query object pointer is NULL, the size field is updated with the
529  * expected size of the query object.
530  * If the userspace pointer in the query object is NULL, or the count is
531  * short, no data is copied.
532  * The count field will be updated to that copied, or if either pointer is
533  * NULL, that which would have been copied.
534  * The size field in the query object will be updated to the size copied.
535  *
536  * Returns:
537  *  * 0 on success, or if size/count is requested using a NULL pointer, or
538  *  * -%EINVAL if args contained non-zero reserved fields, or
539  *  * -%E2BIG if the indicated length of the allocation is less than is
540  *    required to contain the copied data, or
541  *  * -%EFAULT if local memory could not be copied to userspace.
542  */
543 static int
pvr_dev_query_enhancements_get(struct pvr_device * pvr_dev,struct drm_pvr_ioctl_dev_query_args * args)544 pvr_dev_query_enhancements_get(struct pvr_device *pvr_dev,
545 			       struct drm_pvr_ioctl_dev_query_args *args)
546 {
547 	/*
548 	 * @FIXME - hardcoding of numbers here is intended as an
549 	 * intermediate step so the UAPI can be fixed, but requires a
550 	 * a refactor in the future to store them in a more appropriate
551 	 * location
552 	 */
553 	const u32 umd_enhancements[] = {
554 		35421,
555 		42064,
556 	};
557 	struct drm_pvr_dev_query_enhancements query;
558 	u32 out[ARRAY_SIZE(umd_enhancements)];
559 	size_t out_idx = 0;
560 	int err;
561 
562 	if (!args->pointer) {
563 		args->size = sizeof(struct drm_pvr_dev_query_enhancements);
564 		return 0;
565 	}
566 
567 	err = PVR_UOBJ_GET(query, args->size, args->pointer);
568 
569 	if (err < 0)
570 		return err;
571 	if (query._padding_a)
572 		return -EINVAL;
573 	if (query._padding_c)
574 		return -EINVAL;
575 
576 	for (int i = 0; i < ARRAY_SIZE(umd_enhancements); i++) {
577 		if (pvr_device_has_uapi_enhancement(pvr_dev, umd_enhancements[i]))
578 			out[out_idx++] = umd_enhancements[i];
579 	}
580 
581 	if (!query.enhancements)
582 		goto copy_out;
583 	if (query.count < out_idx)
584 		return -E2BIG;
585 
586 	if (copy_to_user(u64_to_user_ptr(query.enhancements), out,
587 			 out_idx * sizeof(u32))) {
588 		return -EFAULT;
589 	}
590 
591 copy_out:
592 	query.count = out_idx;
593 	err = PVR_UOBJ_SET(args->pointer, args->size, query);
594 	if (err < 0)
595 		return err;
596 
597 	args->size = sizeof(query);
598 	return 0;
599 }
600 
601 /**
602  * pvr_ioctl_dev_query() - IOCTL to copy information about a device
603  * @drm_dev: [IN] DRM device.
604  * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
605  *                     &struct drm_pvr_ioctl_dev_query_args.
606  * @file: [IN] DRM file private data.
607  *
608  * Called from userspace with %DRM_IOCTL_PVR_DEV_QUERY.
609  * If the given receiving struct pointer is NULL, or the indicated size is too
610  * small, the expected size of the struct type will be returned in the size
611  * argument field.
612  *
613  * Return:
614  *  * 0 on success or when fetching the size with args->pointer == NULL, or
615  *  * -%E2BIG if the indicated size of the receiving struct is less than is
616  *    required to contain the copied data, or
617  *  * -%EINVAL if the indicated struct type is unknown, or
618  *  * -%ENOMEM if local memory could not be allocated, or
619  *  * -%EFAULT if local memory could not be copied to userspace.
620  */
621 static int
pvr_ioctl_dev_query(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)622 pvr_ioctl_dev_query(struct drm_device *drm_dev, void *raw_args,
623 		    struct drm_file *file)
624 {
625 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
626 	struct drm_pvr_ioctl_dev_query_args *args = raw_args;
627 	int idx;
628 	int ret = -EINVAL;
629 
630 	if (!drm_dev_enter(drm_dev, &idx))
631 		return -EIO;
632 
633 	switch ((enum drm_pvr_dev_query)args->type) {
634 	case DRM_PVR_DEV_QUERY_GPU_INFO_GET:
635 		ret = pvr_dev_query_gpu_info_get(pvr_dev, args);
636 		break;
637 
638 	case DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET:
639 		ret = pvr_dev_query_runtime_info_get(pvr_dev, args);
640 		break;
641 
642 	case DRM_PVR_DEV_QUERY_QUIRKS_GET:
643 		ret = pvr_dev_query_quirks_get(pvr_dev, args);
644 		break;
645 
646 	case DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET:
647 		ret = pvr_dev_query_enhancements_get(pvr_dev, args);
648 		break;
649 
650 	case DRM_PVR_DEV_QUERY_HEAP_INFO_GET:
651 		ret = pvr_heap_info_get(pvr_dev, args);
652 		break;
653 
654 	case DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET:
655 		ret = pvr_static_data_areas_get(pvr_dev, args);
656 		break;
657 	}
658 
659 	drm_dev_exit(idx);
660 
661 	return ret;
662 }
663 
664 /**
665  * pvr_ioctl_create_context() - IOCTL to create a context
666  * @drm_dev: [IN] DRM device.
667  * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
668  *                     &struct drm_pvr_ioctl_create_context_args.
669  * @file: [IN] DRM file private data.
670  *
671  * Called from userspace with %DRM_IOCTL_PVR_CREATE_CONTEXT.
672  *
673  * Return:
674  *  * 0 on success, or
675  *  * -%EINVAL if provided arguments are invalid, or
676  *  * -%EFAULT if arguments can't be copied from userspace, or
677  *  * Any error returned by pvr_create_render_context().
678  */
679 static int
pvr_ioctl_create_context(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)680 pvr_ioctl_create_context(struct drm_device *drm_dev, void *raw_args,
681 			 struct drm_file *file)
682 {
683 	struct drm_pvr_ioctl_create_context_args *args = raw_args;
684 	struct pvr_file *pvr_file = file->driver_priv;
685 	int idx;
686 	int ret;
687 
688 	if (!drm_dev_enter(drm_dev, &idx))
689 		return -EIO;
690 
691 	ret = pvr_context_create(pvr_file, args);
692 
693 	drm_dev_exit(idx);
694 
695 	return ret;
696 }
697 
698 /**
699  * pvr_ioctl_destroy_context() - IOCTL to destroy a context
700  * @drm_dev: [IN] DRM device.
701  * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
702  *                     &struct drm_pvr_ioctl_destroy_context_args.
703  * @file: [IN] DRM file private data.
704  *
705  * Called from userspace with %DRM_IOCTL_PVR_DESTROY_CONTEXT.
706  *
707  * Return:
708  *  * 0 on success, or
709  *  * -%EINVAL if context not in context list.
710  */
711 static int
pvr_ioctl_destroy_context(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)712 pvr_ioctl_destroy_context(struct drm_device *drm_dev, void *raw_args,
713 			  struct drm_file *file)
714 {
715 	struct drm_pvr_ioctl_destroy_context_args *args = raw_args;
716 	struct pvr_file *pvr_file = file->driver_priv;
717 
718 	if (args->_padding_4)
719 		return -EINVAL;
720 
721 	return pvr_context_destroy(pvr_file, args->handle);
722 }
723 
724 /**
725  * pvr_ioctl_create_free_list() - IOCTL to create a free list
726  * @drm_dev: [IN] DRM device.
727  * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
728  *                     &struct drm_pvr_ioctl_create_free_list_args.
729  * @file: [IN] DRM file private data.
730  *
731  * Called from userspace with %DRM_IOCTL_PVR_CREATE_FREE_LIST.
732  *
733  * Return:
734  *  * 0 on success, or
735  *  * Any error returned by pvr_free_list_create().
736  */
737 static int
pvr_ioctl_create_free_list(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)738 pvr_ioctl_create_free_list(struct drm_device *drm_dev, void *raw_args,
739 			   struct drm_file *file)
740 {
741 	struct drm_pvr_ioctl_create_free_list_args *args = raw_args;
742 	struct pvr_file *pvr_file = to_pvr_file(file);
743 	struct pvr_free_list *free_list;
744 	int idx;
745 	int err;
746 
747 	if (!drm_dev_enter(drm_dev, &idx))
748 		return -EIO;
749 
750 	free_list = pvr_free_list_create(pvr_file, args);
751 	if (IS_ERR(free_list)) {
752 		err = PTR_ERR(free_list);
753 		goto err_drm_dev_exit;
754 	}
755 
756 	/* Allocate object handle for userspace. */
757 	err = xa_alloc(&pvr_file->free_list_handles,
758 		       &args->handle,
759 		       free_list,
760 		       xa_limit_32b,
761 		       GFP_KERNEL);
762 	if (err < 0)
763 		goto err_cleanup;
764 
765 	drm_dev_exit(idx);
766 
767 	return 0;
768 
769 err_cleanup:
770 	pvr_free_list_put(free_list);
771 
772 err_drm_dev_exit:
773 	drm_dev_exit(idx);
774 
775 	return err;
776 }
777 
778 /**
779  * pvr_ioctl_destroy_free_list() - IOCTL to destroy a free list
780  * @drm_dev: [IN] DRM device.
781  * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
782  *                 &struct drm_pvr_ioctl_destroy_free_list_args.
783  * @file: [IN] DRM file private data.
784  *
785  * Called from userspace with %DRM_IOCTL_PVR_DESTROY_FREE_LIST.
786  *
787  * Return:
788  *  * 0 on success, or
789  *  * -%EINVAL if free list not in object list.
790  */
791 static int
pvr_ioctl_destroy_free_list(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)792 pvr_ioctl_destroy_free_list(struct drm_device *drm_dev, void *raw_args,
793 			    struct drm_file *file)
794 {
795 	struct drm_pvr_ioctl_destroy_free_list_args *args = raw_args;
796 	struct pvr_file *pvr_file = to_pvr_file(file);
797 	struct pvr_free_list *free_list;
798 
799 	if (args->_padding_4)
800 		return -EINVAL;
801 
802 	free_list = xa_erase(&pvr_file->free_list_handles, args->handle);
803 	if (!free_list)
804 		return -EINVAL;
805 
806 	pvr_free_list_put(free_list);
807 	return 0;
808 }
809 
810 /**
811  * pvr_ioctl_create_hwrt_dataset() - IOCTL to create a HWRT dataset
812  * @drm_dev: [IN] DRM device.
813  * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
814  *                     &struct drm_pvr_ioctl_create_hwrt_dataset_args.
815  * @file: [IN] DRM file private data.
816  *
817  * Called from userspace with %DRM_IOCTL_PVR_CREATE_HWRT_DATASET.
818  *
819  * Return:
820  *  * 0 on success, or
821  *  * Any error returned by pvr_hwrt_dataset_create().
822  */
823 static int
pvr_ioctl_create_hwrt_dataset(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)824 pvr_ioctl_create_hwrt_dataset(struct drm_device *drm_dev, void *raw_args,
825 			      struct drm_file *file)
826 {
827 	struct drm_pvr_ioctl_create_hwrt_dataset_args *args = raw_args;
828 	struct pvr_file *pvr_file = to_pvr_file(file);
829 	struct pvr_hwrt_dataset *hwrt;
830 	int idx;
831 	int err;
832 
833 	if (!drm_dev_enter(drm_dev, &idx))
834 		return -EIO;
835 
836 	hwrt = pvr_hwrt_dataset_create(pvr_file, args);
837 	if (IS_ERR(hwrt)) {
838 		err = PTR_ERR(hwrt);
839 		goto err_drm_dev_exit;
840 	}
841 
842 	/* Allocate object handle for userspace. */
843 	err = xa_alloc(&pvr_file->hwrt_handles,
844 		       &args->handle,
845 		       hwrt,
846 		       xa_limit_32b,
847 		       GFP_KERNEL);
848 	if (err < 0)
849 		goto err_cleanup;
850 
851 	drm_dev_exit(idx);
852 
853 	return 0;
854 
855 err_cleanup:
856 	pvr_hwrt_dataset_put(hwrt);
857 
858 err_drm_dev_exit:
859 	drm_dev_exit(idx);
860 
861 	return err;
862 }
863 
864 /**
865  * pvr_ioctl_destroy_hwrt_dataset() - IOCTL to destroy a HWRT dataset
866  * @drm_dev: [IN] DRM device.
867  * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
868  *                 &struct drm_pvr_ioctl_destroy_hwrt_dataset_args.
869  * @file: [IN] DRM file private data.
870  *
871  * Called from userspace with %DRM_IOCTL_PVR_DESTROY_HWRT_DATASET.
872  *
873  * Return:
874  *  * 0 on success, or
875  *  * -%EINVAL if HWRT dataset not in object list.
876  */
877 static int
pvr_ioctl_destroy_hwrt_dataset(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)878 pvr_ioctl_destroy_hwrt_dataset(struct drm_device *drm_dev, void *raw_args,
879 			       struct drm_file *file)
880 {
881 	struct drm_pvr_ioctl_destroy_hwrt_dataset_args *args = raw_args;
882 	struct pvr_file *pvr_file = to_pvr_file(file);
883 	struct pvr_hwrt_dataset *hwrt;
884 
885 	if (args->_padding_4)
886 		return -EINVAL;
887 
888 	hwrt = xa_erase(&pvr_file->hwrt_handles, args->handle);
889 	if (!hwrt)
890 		return -EINVAL;
891 
892 	pvr_hwrt_dataset_put(hwrt);
893 	return 0;
894 }
895 
896 /**
897  * pvr_ioctl_create_vm_context() - IOCTL to create a VM context
898  * @drm_dev: [IN] DRM device.
899  * @raw_args: [IN/OUT] Arguments passed to this IOCTL. This must be of type
900  *                     &struct drm_pvr_ioctl_create_vm_context_args.
901  * @file: [IN] DRM file private data.
902  *
903  * Called from userspace with %DRM_IOCTL_PVR_CREATE_VM_CONTEXT.
904  *
905  * Return:
906  *  * 0 on success, or
907  *  * Any error returned by pvr_vm_create_context().
908  */
909 static int
pvr_ioctl_create_vm_context(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)910 pvr_ioctl_create_vm_context(struct drm_device *drm_dev, void *raw_args,
911 			    struct drm_file *file)
912 {
913 	struct drm_pvr_ioctl_create_vm_context_args *args = raw_args;
914 	struct pvr_file *pvr_file = to_pvr_file(file);
915 	struct pvr_vm_context *vm_ctx;
916 	int idx;
917 	int err;
918 
919 	if (!drm_dev_enter(drm_dev, &idx))
920 		return -EIO;
921 
922 	if (args->_padding_4) {
923 		err = -EINVAL;
924 		goto err_drm_dev_exit;
925 	}
926 
927 	vm_ctx = pvr_vm_create_context(pvr_file->pvr_dev, true);
928 	if (IS_ERR(vm_ctx)) {
929 		err = PTR_ERR(vm_ctx);
930 		goto err_drm_dev_exit;
931 	}
932 
933 	/* Allocate object handle for userspace. */
934 	err = xa_alloc(&pvr_file->vm_ctx_handles,
935 		       &args->handle,
936 		       vm_ctx,
937 		       xa_limit_32b,
938 		       GFP_KERNEL);
939 	if (err < 0)
940 		goto err_cleanup;
941 
942 	drm_dev_exit(idx);
943 
944 	return 0;
945 
946 err_cleanup:
947 	pvr_vm_context_put(vm_ctx);
948 
949 err_drm_dev_exit:
950 	drm_dev_exit(idx);
951 
952 	return err;
953 }
954 
955 /**
956  * pvr_ioctl_destroy_vm_context() - IOCTL to destroy a VM context
957 * @drm_dev: [IN] DRM device.
958 * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
959 *                 &struct drm_pvr_ioctl_destroy_vm_context_args.
960 * @file: [IN] DRM file private data.
961 *
962 * Called from userspace with %DRM_IOCTL_PVR_DESTROY_VM_CONTEXT.
963 *
964 * Return:
965 *  * 0 on success, or
966 *  * -%EINVAL if object not in object list.
967  */
968 static int
pvr_ioctl_destroy_vm_context(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)969 pvr_ioctl_destroy_vm_context(struct drm_device *drm_dev, void *raw_args,
970 			     struct drm_file *file)
971 {
972 	struct drm_pvr_ioctl_destroy_vm_context_args *args = raw_args;
973 	struct pvr_file *pvr_file = to_pvr_file(file);
974 	struct pvr_vm_context *vm_ctx;
975 
976 	if (args->_padding_4)
977 		return -EINVAL;
978 
979 	vm_ctx = xa_erase(&pvr_file->vm_ctx_handles, args->handle);
980 	if (!vm_ctx)
981 		return -EINVAL;
982 
983 	pvr_vm_context_put(vm_ctx);
984 	return 0;
985 }
986 
987 /**
988  * pvr_ioctl_vm_map() - IOCTL to map buffer to GPU address space.
989  * @drm_dev: [IN] DRM device.
990  * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
991  *                 &struct drm_pvr_ioctl_vm_map_args.
992  * @file: [IN] DRM file private data.
993  *
994  * Called from userspace with %DRM_IOCTL_PVR_VM_MAP.
995  *
996  * Return:
997  *  * 0 on success,
998  *  * -%EINVAL if &drm_pvr_ioctl_vm_op_map_args.flags is not zero,
999  *  * -%EINVAL if the bounds specified by &drm_pvr_ioctl_vm_op_map_args.offset
1000  *    and &drm_pvr_ioctl_vm_op_map_args.size are not valid or do not fall
1001  *    within the buffer object specified by
1002  *    &drm_pvr_ioctl_vm_op_map_args.handle,
1003  *  * -%EINVAL if the bounds specified by
1004  *    &drm_pvr_ioctl_vm_op_map_args.device_addr and
1005  *    &drm_pvr_ioctl_vm_op_map_args.size do not form a valid device-virtual
1006  *    address range which falls entirely within a single heap, or
1007  *  * -%ENOENT if &drm_pvr_ioctl_vm_op_map_args.handle does not refer to a
1008  *    valid PowerVR buffer object.
1009  */
1010 static int
pvr_ioctl_vm_map(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)1011 pvr_ioctl_vm_map(struct drm_device *drm_dev, void *raw_args,
1012 		 struct drm_file *file)
1013 {
1014 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
1015 	struct drm_pvr_ioctl_vm_map_args *args = raw_args;
1016 	struct pvr_file *pvr_file = to_pvr_file(file);
1017 	struct pvr_vm_context *vm_ctx;
1018 
1019 	struct pvr_gem_object *pvr_obj;
1020 	size_t pvr_obj_size;
1021 
1022 	u64 offset_plus_size;
1023 	int idx;
1024 	int err;
1025 
1026 	if (!drm_dev_enter(drm_dev, &idx))
1027 		return -EIO;
1028 
1029 	/* Initial validation of args. */
1030 	if (args->_padding_14) {
1031 		err = -EINVAL;
1032 		goto err_drm_dev_exit;
1033 	}
1034 
1035 	if (args->flags != 0 ||
1036 	    check_add_overflow(args->offset, args->size, &offset_plus_size) ||
1037 	    !pvr_find_heap_containing(pvr_dev, args->device_addr, args->size)) {
1038 		err = -EINVAL;
1039 		goto err_drm_dev_exit;
1040 	}
1041 
1042 	vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
1043 	if (!vm_ctx) {
1044 		err = -EINVAL;
1045 		goto err_drm_dev_exit;
1046 	}
1047 
1048 	pvr_obj = pvr_gem_object_from_handle(pvr_file, args->handle);
1049 	if (!pvr_obj) {
1050 		err = -ENOENT;
1051 		goto err_put_vm_context;
1052 	}
1053 
1054 	pvr_obj_size = pvr_gem_object_size(pvr_obj);
1055 
1056 	/*
1057 	 * Validate offset and size args. The alignment of these will be
1058 	 * checked when mapping; for now just check that they're within valid
1059 	 * bounds
1060 	 */
1061 	if (args->offset >= pvr_obj_size || offset_plus_size > pvr_obj_size) {
1062 		err = -EINVAL;
1063 		goto err_put_pvr_object;
1064 	}
1065 
1066 	err = pvr_vm_map(vm_ctx, pvr_obj, args->offset,
1067 			 args->device_addr, args->size);
1068 	if (err)
1069 		goto err_put_pvr_object;
1070 
1071 	/*
1072 	 * In order to set up the mapping, we needed a reference to &pvr_obj.
1073 	 * However, pvr_vm_map() obtains and stores its own reference, so we
1074 	 * must release ours before returning.
1075 	 */
1076 
1077 err_put_pvr_object:
1078 	pvr_gem_object_put(pvr_obj);
1079 
1080 err_put_vm_context:
1081 	pvr_vm_context_put(vm_ctx);
1082 
1083 err_drm_dev_exit:
1084 	drm_dev_exit(idx);
1085 
1086 	return err;
1087 }
1088 
1089 /**
1090  * pvr_ioctl_vm_unmap() - IOCTL to unmap buffer from GPU address space.
1091  * @drm_dev: [IN] DRM device.
1092  * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
1093  *                 &struct drm_pvr_ioctl_vm_unmap_args.
1094  * @file: [IN] DRM file private data.
1095  *
1096  * Called from userspace with %DRM_IOCTL_PVR_VM_UNMAP.
1097  *
1098  * Return:
1099  *  * 0 on success,
1100  *  * -%EINVAL if &drm_pvr_ioctl_vm_op_unmap_args.device_addr is not a valid
1101  *    device page-aligned device-virtual address, or
1102  *  * -%ENOENT if there is currently no PowerVR buffer object mapped at
1103  *    &drm_pvr_ioctl_vm_op_unmap_args.device_addr.
1104  */
1105 static int
pvr_ioctl_vm_unmap(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)1106 pvr_ioctl_vm_unmap(struct drm_device *drm_dev, void *raw_args,
1107 		   struct drm_file *file)
1108 {
1109 	struct drm_pvr_ioctl_vm_unmap_args *args = raw_args;
1110 	struct pvr_file *pvr_file = to_pvr_file(file);
1111 	struct pvr_vm_context *vm_ctx;
1112 	int err;
1113 
1114 	/* Initial validation of args. */
1115 	if (args->_padding_4)
1116 		return -EINVAL;
1117 
1118 	vm_ctx = pvr_vm_context_lookup(pvr_file, args->vm_context_handle);
1119 	if (!vm_ctx)
1120 		return -EINVAL;
1121 
1122 	err = pvr_vm_unmap(vm_ctx, args->device_addr, args->size);
1123 
1124 	pvr_vm_context_put(vm_ctx);
1125 
1126 	return err;
1127 }
1128 
1129 /*
1130  * pvr_ioctl_submit_job() - IOCTL to submit a job to the GPU
1131  * @drm_dev: [IN] DRM device.
1132  * @raw_args: [IN] Arguments passed to this IOCTL. This must be of type
1133  *                 &struct drm_pvr_ioctl_submit_job_args.
1134  * @file: [IN] DRM file private data.
1135  *
1136  * Called from userspace with %DRM_IOCTL_PVR_SUBMIT_JOB.
1137  *
1138  * Return:
1139  *  * 0 on success, or
1140  *  * -%EINVAL if arguments are invalid.
1141  */
1142 static int
pvr_ioctl_submit_jobs(struct drm_device * drm_dev,void * raw_args,struct drm_file * file)1143 pvr_ioctl_submit_jobs(struct drm_device *drm_dev, void *raw_args,
1144 		      struct drm_file *file)
1145 {
1146 	struct drm_pvr_ioctl_submit_jobs_args *args = raw_args;
1147 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
1148 	struct pvr_file *pvr_file = to_pvr_file(file);
1149 	int idx;
1150 	int err;
1151 
1152 	if (!drm_dev_enter(drm_dev, &idx))
1153 		return -EIO;
1154 
1155 	err = pvr_submit_jobs(pvr_dev, pvr_file, args);
1156 
1157 	drm_dev_exit(idx);
1158 
1159 	return err;
1160 }
1161 
1162 int
pvr_get_uobj(u64 usr_ptr,u32 usr_stride,u32 min_stride,u32 obj_size,void * out)1163 pvr_get_uobj(u64 usr_ptr, u32 usr_stride, u32 min_stride, u32 obj_size, void *out)
1164 {
1165 	if (usr_stride < min_stride)
1166 		return -EINVAL;
1167 
1168 	return copy_struct_from_user(out, obj_size, u64_to_user_ptr(usr_ptr), usr_stride);
1169 }
1170 
1171 int
pvr_set_uobj(u64 usr_ptr,u32 usr_stride,u32 min_stride,u32 obj_size,const void * in)1172 pvr_set_uobj(u64 usr_ptr, u32 usr_stride, u32 min_stride, u32 obj_size, const void *in)
1173 {
1174 	if (usr_stride < min_stride)
1175 		return -EINVAL;
1176 
1177 	if (copy_to_user(u64_to_user_ptr(usr_ptr), in, min_t(u32, usr_stride, obj_size)))
1178 		return -EFAULT;
1179 
1180 	if (usr_stride > obj_size &&
1181 	    clear_user(u64_to_user_ptr(usr_ptr + obj_size), usr_stride - obj_size)) {
1182 		return -EFAULT;
1183 	}
1184 
1185 	return 0;
1186 }
1187 
1188 int
pvr_get_uobj_array(const struct drm_pvr_obj_array * in,u32 min_stride,u32 obj_size,void ** out)1189 pvr_get_uobj_array(const struct drm_pvr_obj_array *in, u32 min_stride, u32 obj_size, void **out)
1190 {
1191 	int ret = 0;
1192 	void *out_alloc;
1193 
1194 	if (in->stride < min_stride)
1195 		return -EINVAL;
1196 
1197 	if (!in->count)
1198 		return 0;
1199 
1200 	out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL);
1201 	if (!out_alloc)
1202 		return -ENOMEM;
1203 
1204 	if (obj_size == in->stride) {
1205 		if (copy_from_user(out_alloc, u64_to_user_ptr(in->array),
1206 				   (unsigned long)obj_size * in->count))
1207 			ret = -EFAULT;
1208 	} else {
1209 		void __user *in_ptr = u64_to_user_ptr(in->array);
1210 		void *out_ptr = out_alloc;
1211 
1212 		for (u32 i = 0; i < in->count; i++) {
1213 			ret = copy_struct_from_user(out_ptr, obj_size, in_ptr, in->stride);
1214 			if (ret)
1215 				break;
1216 
1217 			out_ptr += obj_size;
1218 			in_ptr += in->stride;
1219 		}
1220 	}
1221 
1222 	if (ret) {
1223 		kvfree(out_alloc);
1224 		return ret;
1225 	}
1226 
1227 	*out = out_alloc;
1228 	return 0;
1229 }
1230 
1231 int
pvr_set_uobj_array(const struct drm_pvr_obj_array * out,u32 min_stride,u32 obj_size,const void * in)1232 pvr_set_uobj_array(const struct drm_pvr_obj_array *out, u32 min_stride, u32 obj_size,
1233 		   const void *in)
1234 {
1235 	if (out->stride < min_stride)
1236 		return -EINVAL;
1237 
1238 	if (!out->count)
1239 		return 0;
1240 
1241 	if (obj_size == out->stride) {
1242 		if (copy_to_user(u64_to_user_ptr(out->array), in,
1243 				 (unsigned long)obj_size * out->count))
1244 			return -EFAULT;
1245 	} else {
1246 		u32 cpy_elem_size = min_t(u32, out->stride, obj_size);
1247 		void __user *out_ptr = u64_to_user_ptr(out->array);
1248 		const void *in_ptr = in;
1249 
1250 		for (u32 i = 0; i < out->count; i++) {
1251 			if (copy_to_user(out_ptr, in_ptr, cpy_elem_size))
1252 				return -EFAULT;
1253 
1254 			out_ptr += obj_size;
1255 			in_ptr += out->stride;
1256 		}
1257 
1258 		if (out->stride > obj_size &&
1259 		    clear_user(u64_to_user_ptr(out->array + obj_size),
1260 			       out->stride - obj_size)) {
1261 			return -EFAULT;
1262 		}
1263 	}
1264 
1265 	return 0;
1266 }
1267 
1268 #define DRM_PVR_IOCTL(_name, _func, _flags) \
1269 	DRM_IOCTL_DEF_DRV(PVR_##_name, pvr_ioctl_##_func, _flags)
1270 
1271 /* clang-format off */
1272 
1273 static const struct drm_ioctl_desc pvr_drm_driver_ioctls[] = {
1274 	DRM_PVR_IOCTL(DEV_QUERY, dev_query, DRM_RENDER_ALLOW),
1275 	DRM_PVR_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW),
1276 	DRM_PVR_IOCTL(GET_BO_MMAP_OFFSET, get_bo_mmap_offset, DRM_RENDER_ALLOW),
1277 	DRM_PVR_IOCTL(CREATE_VM_CONTEXT, create_vm_context, DRM_RENDER_ALLOW),
1278 	DRM_PVR_IOCTL(DESTROY_VM_CONTEXT, destroy_vm_context, DRM_RENDER_ALLOW),
1279 	DRM_PVR_IOCTL(VM_MAP, vm_map, DRM_RENDER_ALLOW),
1280 	DRM_PVR_IOCTL(VM_UNMAP, vm_unmap, DRM_RENDER_ALLOW),
1281 	DRM_PVR_IOCTL(CREATE_CONTEXT, create_context, DRM_RENDER_ALLOW),
1282 	DRM_PVR_IOCTL(DESTROY_CONTEXT, destroy_context, DRM_RENDER_ALLOW),
1283 	DRM_PVR_IOCTL(CREATE_FREE_LIST, create_free_list, DRM_RENDER_ALLOW),
1284 	DRM_PVR_IOCTL(DESTROY_FREE_LIST, destroy_free_list, DRM_RENDER_ALLOW),
1285 	DRM_PVR_IOCTL(CREATE_HWRT_DATASET, create_hwrt_dataset, DRM_RENDER_ALLOW),
1286 	DRM_PVR_IOCTL(DESTROY_HWRT_DATASET, destroy_hwrt_dataset, DRM_RENDER_ALLOW),
1287 	DRM_PVR_IOCTL(SUBMIT_JOBS, submit_jobs, DRM_RENDER_ALLOW),
1288 };
1289 
1290 /* clang-format on */
1291 
1292 #undef DRM_PVR_IOCTL
1293 
1294 /**
1295  * pvr_drm_driver_open() - Driver callback when a new &struct drm_file is opened
1296  * @drm_dev: [IN] DRM device.
1297  * @file: [IN] DRM file private data.
1298  *
1299  * Allocates powervr-specific file private data (&struct pvr_file).
1300  *
1301  * Registered in &pvr_drm_driver.
1302  *
1303  * Return:
1304  *  * 0 on success,
1305  *  * -%ENOMEM if the allocation of a &struct ipvr_file fails, or
1306  *  * Any error returned by pvr_memory_context_init().
1307  */
1308 static int
pvr_drm_driver_open(struct drm_device * drm_dev,struct drm_file * file)1309 pvr_drm_driver_open(struct drm_device *drm_dev, struct drm_file *file)
1310 {
1311 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
1312 	struct pvr_file *pvr_file;
1313 
1314 	pvr_file = kzalloc(sizeof(*pvr_file), GFP_KERNEL);
1315 	if (!pvr_file)
1316 		return -ENOMEM;
1317 
1318 	/*
1319 	 * Store reference to base DRM file private data for use by
1320 	 * from_pvr_file.
1321 	 */
1322 	pvr_file->file = file;
1323 
1324 	/*
1325 	 * Store reference to powervr-specific outer device struct in file
1326 	 * private data for convenient access.
1327 	 */
1328 	pvr_file->pvr_dev = pvr_dev;
1329 
1330 	INIT_LIST_HEAD(&pvr_file->contexts);
1331 
1332 	xa_init_flags(&pvr_file->ctx_handles, XA_FLAGS_ALLOC1);
1333 	xa_init_flags(&pvr_file->free_list_handles, XA_FLAGS_ALLOC1);
1334 	xa_init_flags(&pvr_file->hwrt_handles, XA_FLAGS_ALLOC1);
1335 	xa_init_flags(&pvr_file->vm_ctx_handles, XA_FLAGS_ALLOC1);
1336 
1337 	/*
1338 	 * Store reference to powervr-specific file private data in DRM file
1339 	 * private data.
1340 	 */
1341 	file->driver_priv = pvr_file;
1342 
1343 	return 0;
1344 }
1345 
1346 /**
1347  * pvr_drm_driver_postclose() - One of the driver callbacks when a &struct
1348  * drm_file is closed.
1349  * @drm_dev: [IN] DRM device (unused).
1350  * @file: [IN] DRM file private data.
1351  *
1352  * Frees powervr-specific file private data (&struct pvr_file).
1353  *
1354  * Registered in &pvr_drm_driver.
1355  */
1356 static void
pvr_drm_driver_postclose(__always_unused struct drm_device * drm_dev,struct drm_file * file)1357 pvr_drm_driver_postclose(__always_unused struct drm_device *drm_dev,
1358 			 struct drm_file *file)
1359 {
1360 	struct pvr_file *pvr_file = to_pvr_file(file);
1361 
1362 	/* Kill remaining contexts. */
1363 	pvr_destroy_contexts_for_file(pvr_file);
1364 
1365 	/* Drop references on any remaining objects. */
1366 	pvr_destroy_free_lists_for_file(pvr_file);
1367 	pvr_destroy_hwrt_datasets_for_file(pvr_file);
1368 	pvr_destroy_vm_contexts_for_file(pvr_file);
1369 
1370 	kfree(pvr_file);
1371 	file->driver_priv = NULL;
1372 }
1373 
1374 DEFINE_DRM_GEM_FOPS(pvr_drm_driver_fops);
1375 
1376 static struct drm_driver pvr_drm_driver = {
1377 	.driver_features = DRIVER_GEM | DRIVER_GEM_GPUVA | DRIVER_RENDER |
1378 			   DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
1379 	.open = pvr_drm_driver_open,
1380 	.postclose = pvr_drm_driver_postclose,
1381 	.ioctls = pvr_drm_driver_ioctls,
1382 	.num_ioctls = ARRAY_SIZE(pvr_drm_driver_ioctls),
1383 	.fops = &pvr_drm_driver_fops,
1384 #if defined(CONFIG_DEBUG_FS)
1385 	.debugfs_init = pvr_debugfs_init,
1386 #endif
1387 
1388 	.name = PVR_DRIVER_NAME,
1389 	.desc = PVR_DRIVER_DESC,
1390 	.date = PVR_DRIVER_DATE,
1391 	.major = PVR_DRIVER_MAJOR,
1392 	.minor = PVR_DRIVER_MINOR,
1393 	.patchlevel = PVR_DRIVER_PATCHLEVEL,
1394 
1395 	.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
1396 	.gem_create_object = pvr_gem_create_object,
1397 };
1398 
1399 static int
pvr_probe(struct platform_device * plat_dev)1400 pvr_probe(struct platform_device *plat_dev)
1401 {
1402 	struct pvr_device *pvr_dev;
1403 	struct drm_device *drm_dev;
1404 	int err;
1405 
1406 	pvr_dev = devm_drm_dev_alloc(&plat_dev->dev, &pvr_drm_driver,
1407 				     struct pvr_device, base);
1408 	if (IS_ERR(pvr_dev))
1409 		return PTR_ERR(pvr_dev);
1410 
1411 	drm_dev = &pvr_dev->base;
1412 
1413 	platform_set_drvdata(plat_dev, drm_dev);
1414 
1415 	init_rwsem(&pvr_dev->reset_sem);
1416 
1417 	pvr_context_device_init(pvr_dev);
1418 
1419 	err = pvr_queue_device_init(pvr_dev);
1420 	if (err)
1421 		goto err_context_fini;
1422 
1423 	devm_pm_runtime_enable(&plat_dev->dev);
1424 	pm_runtime_mark_last_busy(&plat_dev->dev);
1425 
1426 	pm_runtime_set_autosuspend_delay(&plat_dev->dev, 50);
1427 	pm_runtime_use_autosuspend(&plat_dev->dev);
1428 	pvr_watchdog_init(pvr_dev);
1429 
1430 	err = pvr_device_init(pvr_dev);
1431 	if (err)
1432 		goto err_watchdog_fini;
1433 
1434 	err = drm_dev_register(drm_dev, 0);
1435 	if (err)
1436 		goto err_device_fini;
1437 
1438 	xa_init_flags(&pvr_dev->free_list_ids, XA_FLAGS_ALLOC1);
1439 	xa_init_flags(&pvr_dev->job_ids, XA_FLAGS_ALLOC1);
1440 
1441 	return 0;
1442 
1443 err_device_fini:
1444 	pvr_device_fini(pvr_dev);
1445 
1446 err_watchdog_fini:
1447 	pvr_watchdog_fini(pvr_dev);
1448 
1449 	pvr_queue_device_fini(pvr_dev);
1450 
1451 err_context_fini:
1452 	pvr_context_device_fini(pvr_dev);
1453 
1454 	return err;
1455 }
1456 
pvr_remove(struct platform_device * plat_dev)1457 static void pvr_remove(struct platform_device *plat_dev)
1458 {
1459 	struct drm_device *drm_dev = platform_get_drvdata(plat_dev);
1460 	struct pvr_device *pvr_dev = to_pvr_device(drm_dev);
1461 
1462 	WARN_ON(!xa_empty(&pvr_dev->job_ids));
1463 	WARN_ON(!xa_empty(&pvr_dev->free_list_ids));
1464 
1465 	xa_destroy(&pvr_dev->job_ids);
1466 	xa_destroy(&pvr_dev->free_list_ids);
1467 
1468 	pm_runtime_suspend(drm_dev->dev);
1469 	pvr_device_fini(pvr_dev);
1470 	drm_dev_unplug(drm_dev);
1471 	pvr_watchdog_fini(pvr_dev);
1472 	pvr_queue_device_fini(pvr_dev);
1473 	pvr_context_device_fini(pvr_dev);
1474 }
1475 
1476 static const struct of_device_id dt_match[] = {
1477 	{ .compatible = "img,img-axe", .data = NULL },
1478 	{}
1479 };
1480 MODULE_DEVICE_TABLE(of, dt_match);
1481 
1482 static const struct dev_pm_ops pvr_pm_ops = {
1483 	RUNTIME_PM_OPS(pvr_power_device_suspend, pvr_power_device_resume, pvr_power_device_idle)
1484 };
1485 
1486 static struct platform_driver pvr_driver = {
1487 	.probe = pvr_probe,
1488 	.remove = pvr_remove,
1489 	.driver = {
1490 		.name = PVR_DRIVER_NAME,
1491 		.pm = &pvr_pm_ops,
1492 		.of_match_table = dt_match,
1493 	},
1494 };
1495 module_platform_driver(pvr_driver);
1496 
1497 MODULE_AUTHOR("Imagination Technologies Ltd.");
1498 MODULE_DESCRIPTION(PVR_DRIVER_DESC);
1499 MODULE_LICENSE("Dual MIT/GPL");
1500 MODULE_IMPORT_NS("DMA_BUF");
1501 MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw");
1502