xref: /linux/drivers/gpu/drm/i915/i915_scheduler_types.h (revision 762f99f4f3cb41a775b5157dd761217beba65873)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #ifndef _I915_SCHEDULER_TYPES_H_
8 #define _I915_SCHEDULER_TYPES_H_
9 
10 #include <linux/list.h>
11 
12 #include "gt/intel_engine_types.h"
13 #include "i915_priolist_types.h"
14 
15 struct drm_i915_private;
16 struct i915_request;
17 struct intel_engine_cs;
18 
19 struct i915_sched_attr {
20 	/**
21 	 * @priority: execution and service priority
22 	 *
23 	 * All clients are equal, but some are more equal than others!
24 	 *
25 	 * Requests from a context with a greater (more positive) value of
26 	 * @priority will be executed before those with a lower @priority
27 	 * value, forming a simple QoS.
28 	 *
29 	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
30 	 */
31 	int priority;
32 };
33 
34 /*
35  * "People assume that time is a strict progression of cause to effect, but
36  * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
37  * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
38  *
39  * Requests exist in a complex web of interdependencies. Each request
40  * has to wait for some other request to complete before it is ready to be run
41  * (e.g. we have to wait until the pixels have been rendering into a texture
42  * before we can copy from it). We track the readiness of a request in terms
43  * of fences, but we also need to keep the dependency tree for the lifetime
44  * of the request (beyond the life of an individual fence). We use the tree
45  * at various points to reorder the requests whilst keeping the requests
46  * in order with respect to their various dependencies.
47  *
48  * There is no active component to the "scheduler". As we know the dependency
49  * DAG of each request, we are able to insert it into a sorted queue when it
50  * is ready, and are able to reorder its portion of the graph to accommodate
51  * dynamic priority changes.
52  *
53  * Ok, there is now one active element to the "scheduler" in the backends.
54  * We let a new context run for a small amount of time before re-evaluating
55  * the run order. As we re-evaluate, we maintain the strict ordering of
56  * dependencies, but attempt to rotate the active contexts (the current context
57  * is put to the back of its priority queue, then reshuffling its dependents).
58  * This provides minimal timeslicing and prevents a userspace hog (e.g.
59  * something waiting on a user semaphore [VkEvent]) from denying service to
60  * others.
61  */
62 struct i915_sched_node {
63 	struct list_head signalers_list; /* those before us, we depend upon */
64 	struct list_head waiters_list; /* those after us, they depend upon us */
65 	struct list_head link;
66 	struct i915_sched_attr attr;
67 	unsigned int flags;
68 #define I915_SCHED_HAS_EXTERNAL_CHAIN	BIT(0)
69 	intel_engine_mask_t semaphores;
70 };
71 
72 struct i915_dependency {
73 	struct i915_sched_node *signaler;
74 	struct i915_sched_node *waiter;
75 	struct list_head signal_link;
76 	struct list_head wait_link;
77 	struct list_head dfs_link;
78 	unsigned long flags;
79 #define I915_DEPENDENCY_ALLOC		BIT(0)
80 #define I915_DEPENDENCY_EXTERNAL	BIT(1)
81 #define I915_DEPENDENCY_WEAK		BIT(2)
82 };
83 
84 #define for_each_waiter(p__, rq__) \
85 	list_for_each_entry_lockless(p__, \
86 				     &(rq__)->sched.waiters_list, \
87 				     wait_link)
88 
89 #define for_each_signaler(p__, rq__) \
90 	list_for_each_entry_rcu(p__, \
91 				&(rq__)->sched.signalers_list, \
92 				signal_link)
93 
94 /**
95  * struct i915_sched_engine - scheduler engine
96  *
97  * A schedule engine represents a submission queue with different priority
98  * bands. It contains all the common state (relative to the backend) to queue,
99  * track, and submit a request.
100  *
101  * This object at the moment is quite i915 specific but will transition into a
102  * container for the drm_gpu_scheduler plus a few other variables once the i915
103  * is integrated with the DRM scheduler.
104  */
105 struct i915_sched_engine {
106 	/**
107 	 * @ref: reference count of schedule engine object
108 	 */
109 	struct kref ref;
110 
111 	/**
112 	 * @lock: protects requests in priority lists, requests, hold and
113 	 * tasklet while running
114 	 */
115 	spinlock_t lock;
116 
117 	/**
118 	 * @requests: list of requests inflight on this schedule engine
119 	 */
120 	struct list_head requests;
121 
122 	/**
123 	 * @hold: list of ready requests, but on hold
124 	 */
125 	struct list_head hold;
126 
127 	/**
128 	 * @tasklet: softirq tasklet for submission
129 	 */
130 	struct tasklet_struct tasklet;
131 
132 	/**
133 	 * @default_priolist: priority list for I915_PRIORITY_NORMAL
134 	 */
135 	struct i915_priolist default_priolist;
136 
137 	/**
138 	 * @queue_priority_hint: Highest pending priority.
139 	 *
140 	 * When we add requests into the queue, or adjust the priority of
141 	 * executing requests, we compute the maximum priority of those
142 	 * pending requests. We can then use this value to determine if
143 	 * we need to preempt the executing requests to service the queue.
144 	 * However, since the we may have recorded the priority of an inflight
145 	 * request we wanted to preempt but since completed, at the time of
146 	 * dequeuing the priority hint may no longer may match the highest
147 	 * available request priority.
148 	 */
149 	int queue_priority_hint;
150 
151 	/**
152 	 * @queue: queue of requests, in priority lists
153 	 */
154 	struct rb_root_cached queue;
155 
156 	/**
157 	 * @no_priolist: priority lists disabled
158 	 */
159 	bool no_priolist;
160 
161 	/**
162 	 * @private_data: private data of the submission backend
163 	 */
164 	void *private_data;
165 
166 	/**
167 	 * @destroy: destroy schedule engine / cleanup in backend
168 	 */
169 	void	(*destroy)(struct kref *kref);
170 
171 	/**
172 	 * @disabled: check if backend has disabled submission
173 	 */
174 	bool	(*disabled)(struct i915_sched_engine *sched_engine);
175 
176 	/**
177 	 * @kick_backend: kick backend after a request's priority has changed
178 	 */
179 	void	(*kick_backend)(const struct i915_request *rq,
180 				int prio);
181 
182 	/**
183 	 * @bump_inflight_request_prio: update priority of an inflight request
184 	 */
185 	void	(*bump_inflight_request_prio)(struct i915_request *rq,
186 					      int prio);
187 
188 	/**
189 	 * @retire_inflight_request_prio: indicate request is retired to
190 	 * priority tracking
191 	 */
192 	void	(*retire_inflight_request_prio)(struct i915_request *rq);
193 
194 	/**
195 	 * @schedule: adjust priority of request
196 	 *
197 	 * Call when the priority on a request has changed and it and its
198 	 * dependencies may need rescheduling. Note the request itself may
199 	 * not be ready to run!
200 	 */
201 	void	(*schedule)(struct i915_request *request,
202 			    const struct i915_sched_attr *attr);
203 };
204 
205 #endif /* _I915_SCHEDULER_TYPES_H_ */
206