xref: /linux/drivers/gpu/drm/imagination/pvr_queue.h (revision 28f587adb69957125241a8df359b68b134f3c4a1)
1 /* SPDX-License-Identifier: GPL-2.0-only OR MIT */
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #ifndef PVR_QUEUE_H
5 #define PVR_QUEUE_H
6 
7 #include <drm/gpu_scheduler.h>
8 #include <linux/workqueue.h>
9 
10 #include "pvr_cccb.h"
11 #include "pvr_device.h"
12 
13 struct pvr_context;
14 struct pvr_queue;
15 
16 /**
17  * struct pvr_queue_fence_ctx - Queue fence context
18  *
19  * Used to implement dma_fence_ops for pvr_job::{done,cccb}_fence.
20  */
21 struct pvr_queue_fence_ctx {
22 	/** @id: Fence context ID allocated with dma_fence_context_alloc(). */
23 	u64 id;
24 
25 	/** @seqno: Sequence number incremented each time a fence is created. */
26 	atomic_t seqno;
27 
28 	/** @lock: Lock used to synchronize access to fences allocated by this context. */
29 	spinlock_t lock;
30 };
31 
32 /**
33  * struct pvr_queue_cccb_fence_ctx - CCCB fence context
34  *
35  * Context used to manage fences controlling access to the CCCB. No fences are
36  * issued if there's enough space in the CCCB to push job commands.
37  */
38 struct pvr_queue_cccb_fence_ctx {
39 	/** @base: Base queue fence context. */
40 	struct pvr_queue_fence_ctx base;
41 
42 	/**
43 	 * @job: Job waiting for CCCB space.
44 	 *
45 	 * Thanks to the serializationg done at the drm_sched_entity level,
46 	 * there's no more than one job waiting for CCCB at a given time.
47 	 *
48 	 * This field is NULL if no jobs are currently waiting for CCCB space.
49 	 *
50 	 * Must be accessed with @job_lock held.
51 	 */
52 	struct pvr_job *job;
53 
54 	/** @job_lock: Lock protecting access to the job object. */
55 	struct mutex job_lock;
56 };
57 
58 /**
59  * struct pvr_queue_fence - Queue fence object
60  */
61 struct pvr_queue_fence {
62 	/** @base: Base dma_fence. */
63 	struct dma_fence base;
64 
65 	/** @queue: Queue that created this fence. */
66 	struct pvr_queue *queue;
67 
68 	/** @release_work: Fence release work structure. */
69 	struct work_struct release_work;
70 };
71 
72 /**
73  * struct pvr_queue - Job queue
74  *
75  * Used to queue and track execution of pvr_job objects.
76  */
77 struct pvr_queue {
78 	/** @scheduler: Single entity scheduler use to push jobs to this queue. */
79 	struct drm_gpu_scheduler scheduler;
80 
81 	/** @entity: Scheduling entity backing this queue. */
82 	struct drm_sched_entity entity;
83 
84 	/** @type: Type of jobs queued to this queue. */
85 	enum drm_pvr_job_type type;
86 
87 	/** @ctx: Context object this queue is bound to. */
88 	struct pvr_context *ctx;
89 
90 	/** @node: Used to add the queue to the active/idle queue list. */
91 	struct list_head node;
92 
93 	/**
94 	 * @in_flight_job_count: Number of jobs submitted to the CCCB that
95 	 * have not been processed yet.
96 	 */
97 	atomic_t in_flight_job_count;
98 
99 	/**
100 	 * @cccb_fence_ctx: CCCB fence context.
101 	 *
102 	 * Used to control access to the CCCB is full, such that we don't
103 	 * end up trying to push commands to the CCCB if there's not enough
104 	 * space to receive all commands needed for a job to complete.
105 	 */
106 	struct pvr_queue_cccb_fence_ctx cccb_fence_ctx;
107 
108 	/** @job_fence_ctx: Job fence context object. */
109 	struct pvr_queue_fence_ctx job_fence_ctx;
110 
111 	/** @timeline_ufo: Timeline UFO for the context queue. */
112 	struct {
113 		/** @fw_obj: FW object representing the UFO value. */
114 		struct pvr_fw_object *fw_obj;
115 
116 		/** @value: CPU mapping of the UFO value. */
117 		u32 *value;
118 	} timeline_ufo;
119 
120 	/**
121 	 * @last_queued_job_scheduled_fence: The scheduled fence of the last
122 	 * job queued to this queue.
123 	 *
124 	 * We use it to insert frag -> geom dependencies when issuing combined
125 	 * geom+frag jobs, to guarantee that the fragment job that's part of
126 	 * the combined operation comes after all fragment jobs that were queued
127 	 * before it.
128 	 */
129 	struct dma_fence *last_queued_job_scheduled_fence;
130 
131 	/** @cccb: Client Circular Command Buffer. */
132 	struct pvr_cccb cccb;
133 
134 	/** @reg_state_obj: FW object representing the register state of this queue. */
135 	struct pvr_fw_object *reg_state_obj;
136 
137 	/** @ctx_offset: Offset of the queue context in the FW context object. */
138 	u32 ctx_offset;
139 
140 	/** @callstack_addr: Initial call stack address for register state object. */
141 	u64 callstack_addr;
142 };
143 
144 bool pvr_queue_fence_is_ufo_backed(struct dma_fence *f);
145 
146 int pvr_queue_job_init(struct pvr_job *job);
147 
148 void pvr_queue_job_cleanup(struct pvr_job *job);
149 
150 void pvr_queue_job_push(struct pvr_job *job);
151 
152 struct dma_fence *pvr_queue_job_arm(struct pvr_job *job);
153 
154 struct pvr_queue *pvr_queue_create(struct pvr_context *ctx,
155 				   enum drm_pvr_job_type type,
156 				   struct drm_pvr_ioctl_create_context_args *args,
157 				   void *fw_ctx_map);
158 
159 void pvr_queue_kill(struct pvr_queue *queue);
160 
161 void pvr_queue_destroy(struct pvr_queue *queue);
162 
163 void pvr_queue_process(struct pvr_queue *queue);
164 
165 void pvr_queue_device_pre_reset(struct pvr_device *pvr_dev);
166 
167 void pvr_queue_device_post_reset(struct pvr_device *pvr_dev);
168 
169 int pvr_queue_device_init(struct pvr_device *pvr_dev);
170 
171 void pvr_queue_device_fini(struct pvr_device *pvr_dev);
172 
173 #endif /* PVR_QUEUE_H */
174