1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
5
6 #include <linux/string.h>
7
8 #include "i915_drv.h"
9 #include "intel_atomic.h"
10 #include "intel_display_types.h"
11 #include "intel_global_state.h"
12
13 struct intel_global_commit {
14 struct kref ref;
15 struct completion done;
16 };
17
commit_new(void)18 static struct intel_global_commit *commit_new(void)
19 {
20 struct intel_global_commit *commit;
21
22 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
23 if (!commit)
24 return NULL;
25
26 init_completion(&commit->done);
27 kref_init(&commit->ref);
28
29 return commit;
30 }
31
__commit_free(struct kref * kref)32 static void __commit_free(struct kref *kref)
33 {
34 struct intel_global_commit *commit =
35 container_of(kref, typeof(*commit), ref);
36
37 kfree(commit);
38 }
39
commit_get(struct intel_global_commit * commit)40 static struct intel_global_commit *commit_get(struct intel_global_commit *commit)
41 {
42 if (commit)
43 kref_get(&commit->ref);
44
45 return commit;
46 }
47
commit_put(struct intel_global_commit * commit)48 static void commit_put(struct intel_global_commit *commit)
49 {
50 if (commit)
51 kref_put(&commit->ref, __commit_free);
52 }
53
__intel_atomic_global_state_free(struct kref * kref)54 static void __intel_atomic_global_state_free(struct kref *kref)
55 {
56 struct intel_global_state *obj_state =
57 container_of(kref, struct intel_global_state, ref);
58 struct intel_global_obj *obj = obj_state->obj;
59
60 commit_put(obj_state->commit);
61
62 obj->funcs->atomic_destroy_state(obj, obj_state);
63 }
64
intel_atomic_global_state_put(struct intel_global_state * obj_state)65 static void intel_atomic_global_state_put(struct intel_global_state *obj_state)
66 {
67 kref_put(&obj_state->ref, __intel_atomic_global_state_free);
68 }
69
70 static struct intel_global_state *
intel_atomic_global_state_get(struct intel_global_state * obj_state)71 intel_atomic_global_state_get(struct intel_global_state *obj_state)
72 {
73 kref_get(&obj_state->ref);
74
75 return obj_state;
76 }
77
intel_atomic_global_obj_init(struct drm_i915_private * dev_priv,struct intel_global_obj * obj,struct intel_global_state * state,const struct intel_global_state_funcs * funcs)78 void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
79 struct intel_global_obj *obj,
80 struct intel_global_state *state,
81 const struct intel_global_state_funcs *funcs)
82 {
83 memset(obj, 0, sizeof(*obj));
84
85 state->obj = obj;
86
87 kref_init(&state->ref);
88
89 obj->state = state;
90 obj->funcs = funcs;
91 list_add_tail(&obj->head, &dev_priv->display.global.obj_list);
92 }
93
intel_atomic_global_obj_cleanup(struct drm_i915_private * dev_priv)94 void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv)
95 {
96 struct intel_global_obj *obj, *next;
97
98 list_for_each_entry_safe(obj, next, &dev_priv->display.global.obj_list, head) {
99 list_del(&obj->head);
100
101 drm_WARN_ON(&dev_priv->drm, kref_read(&obj->state->ref) != 1);
102 intel_atomic_global_state_put(obj->state);
103 }
104 }
105
assert_global_state_write_locked(struct drm_i915_private * dev_priv)106 static void assert_global_state_write_locked(struct drm_i915_private *dev_priv)
107 {
108 struct intel_crtc *crtc;
109
110 for_each_intel_crtc(&dev_priv->drm, crtc)
111 drm_modeset_lock_assert_held(&crtc->base.mutex);
112 }
113
modeset_lock_is_held(struct drm_modeset_acquire_ctx * ctx,struct drm_modeset_lock * lock)114 static bool modeset_lock_is_held(struct drm_modeset_acquire_ctx *ctx,
115 struct drm_modeset_lock *lock)
116 {
117 struct drm_modeset_lock *l;
118
119 list_for_each_entry(l, &ctx->locked, head) {
120 if (lock == l)
121 return true;
122 }
123
124 return false;
125 }
126
assert_global_state_read_locked(struct intel_atomic_state * state)127 static void assert_global_state_read_locked(struct intel_atomic_state *state)
128 {
129 struct drm_modeset_acquire_ctx *ctx = state->base.acquire_ctx;
130 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
131 struct intel_crtc *crtc;
132
133 for_each_intel_crtc(&dev_priv->drm, crtc) {
134 if (modeset_lock_is_held(ctx, &crtc->base.mutex))
135 return;
136 }
137
138 drm_WARN(&dev_priv->drm, 1, "Global state not read locked\n");
139 }
140
141 struct intel_global_state *
intel_atomic_get_global_obj_state(struct intel_atomic_state * state,struct intel_global_obj * obj)142 intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
143 struct intel_global_obj *obj)
144 {
145 struct drm_i915_private *i915 = to_i915(state->base.dev);
146 int index, num_objs, i;
147 size_t size;
148 struct __intel_global_objs_state *arr;
149 struct intel_global_state *obj_state;
150
151 for (i = 0; i < state->num_global_objs; i++)
152 if (obj == state->global_objs[i].ptr)
153 return state->global_objs[i].state;
154
155 assert_global_state_read_locked(state);
156
157 num_objs = state->num_global_objs + 1;
158 size = sizeof(*state->global_objs) * num_objs;
159 arr = krealloc(state->global_objs, size, GFP_KERNEL);
160 if (!arr)
161 return ERR_PTR(-ENOMEM);
162
163 state->global_objs = arr;
164 index = state->num_global_objs;
165 memset(&state->global_objs[index], 0, sizeof(*state->global_objs));
166
167 obj_state = obj->funcs->atomic_duplicate_state(obj);
168 if (!obj_state)
169 return ERR_PTR(-ENOMEM);
170
171 obj_state->obj = obj;
172 obj_state->changed = false;
173 obj_state->serialized = false;
174 obj_state->commit = NULL;
175
176 kref_init(&obj_state->ref);
177
178 state->global_objs[index].state = obj_state;
179 state->global_objs[index].old_state =
180 intel_atomic_global_state_get(obj->state);
181 state->global_objs[index].new_state = obj_state;
182 state->global_objs[index].ptr = obj;
183 obj_state->state = state;
184
185 state->num_global_objs = num_objs;
186
187 drm_dbg_atomic(&i915->drm, "Added new global object %p state %p to %p\n",
188 obj, obj_state, state);
189
190 return obj_state;
191 }
192
193 struct intel_global_state *
intel_atomic_get_old_global_obj_state(struct intel_atomic_state * state,struct intel_global_obj * obj)194 intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state,
195 struct intel_global_obj *obj)
196 {
197 int i;
198
199 for (i = 0; i < state->num_global_objs; i++)
200 if (obj == state->global_objs[i].ptr)
201 return state->global_objs[i].old_state;
202
203 return NULL;
204 }
205
206 struct intel_global_state *
intel_atomic_get_new_global_obj_state(struct intel_atomic_state * state,struct intel_global_obj * obj)207 intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
208 struct intel_global_obj *obj)
209 {
210 int i;
211
212 for (i = 0; i < state->num_global_objs; i++)
213 if (obj == state->global_objs[i].ptr)
214 return state->global_objs[i].new_state;
215
216 return NULL;
217 }
218
intel_atomic_swap_global_state(struct intel_atomic_state * state)219 void intel_atomic_swap_global_state(struct intel_atomic_state *state)
220 {
221 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
222 struct intel_global_state *old_obj_state, *new_obj_state;
223 struct intel_global_obj *obj;
224 int i;
225
226 for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
227 new_obj_state, i) {
228 drm_WARN_ON(&dev_priv->drm, obj->state != old_obj_state);
229
230 /*
231 * If the new state wasn't modified (and properly
232 * locked for write access) we throw it away.
233 */
234 if (!new_obj_state->changed)
235 continue;
236
237 assert_global_state_write_locked(dev_priv);
238
239 old_obj_state->state = state;
240 new_obj_state->state = NULL;
241
242 state->global_objs[i].state = old_obj_state;
243
244 intel_atomic_global_state_put(obj->state);
245 obj->state = intel_atomic_global_state_get(new_obj_state);
246 }
247 }
248
intel_atomic_clear_global_state(struct intel_atomic_state * state)249 void intel_atomic_clear_global_state(struct intel_atomic_state *state)
250 {
251 int i;
252
253 for (i = 0; i < state->num_global_objs; i++) {
254 intel_atomic_global_state_put(state->global_objs[i].old_state);
255 intel_atomic_global_state_put(state->global_objs[i].new_state);
256
257 state->global_objs[i].ptr = NULL;
258 state->global_objs[i].state = NULL;
259 state->global_objs[i].old_state = NULL;
260 state->global_objs[i].new_state = NULL;
261 }
262 state->num_global_objs = 0;
263 }
264
intel_atomic_lock_global_state(struct intel_global_state * obj_state)265 int intel_atomic_lock_global_state(struct intel_global_state *obj_state)
266 {
267 struct intel_atomic_state *state = obj_state->state;
268 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
269 struct intel_crtc *crtc;
270
271 for_each_intel_crtc(&dev_priv->drm, crtc) {
272 int ret;
273
274 ret = drm_modeset_lock(&crtc->base.mutex,
275 state->base.acquire_ctx);
276 if (ret)
277 return ret;
278 }
279
280 obj_state->changed = true;
281
282 return 0;
283 }
284
intel_atomic_serialize_global_state(struct intel_global_state * obj_state)285 int intel_atomic_serialize_global_state(struct intel_global_state *obj_state)
286 {
287 int ret;
288
289 ret = intel_atomic_lock_global_state(obj_state);
290 if (ret)
291 return ret;
292
293 obj_state->serialized = true;
294
295 return 0;
296 }
297
298 bool
intel_atomic_global_state_is_serialized(struct intel_atomic_state * state)299 intel_atomic_global_state_is_serialized(struct intel_atomic_state *state)
300 {
301 struct drm_i915_private *i915 = to_i915(state->base.dev);
302 struct intel_crtc *crtc;
303
304 for_each_intel_crtc(&i915->drm, crtc)
305 if (!intel_atomic_get_new_crtc_state(state, crtc))
306 return false;
307 return true;
308 }
309
310 int
intel_atomic_global_state_setup_commit(struct intel_atomic_state * state)311 intel_atomic_global_state_setup_commit(struct intel_atomic_state *state)
312 {
313 const struct intel_global_state *old_obj_state;
314 struct intel_global_state *new_obj_state;
315 struct intel_global_obj *obj;
316 int i;
317
318 for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
319 new_obj_state, i) {
320 struct intel_global_commit *commit = NULL;
321
322 if (new_obj_state->serialized) {
323 /*
324 * New commit which is going to be completed
325 * after the hardware reprogramming is done.
326 */
327 commit = commit_new();
328 if (!commit)
329 return -ENOMEM;
330 } else if (new_obj_state->changed) {
331 /*
332 * We're going to swap to this state, so carry the
333 * previous commit along, in case it's not yet done.
334 */
335 commit = commit_get(old_obj_state->commit);
336 }
337
338 new_obj_state->commit = commit;
339 }
340
341 return 0;
342 }
343
344 int
intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state * state)345 intel_atomic_global_state_wait_for_dependencies(struct intel_atomic_state *state)
346 {
347 struct drm_i915_private *i915 = to_i915(state->base.dev);
348 const struct intel_global_state *old_obj_state;
349 struct intel_global_obj *obj;
350 int i;
351
352 for_each_old_global_obj_in_state(state, obj, old_obj_state, i) {
353 struct intel_global_commit *commit = old_obj_state->commit;
354 long ret;
355
356 if (!commit)
357 continue;
358
359 ret = wait_for_completion_timeout(&commit->done, 10 * HZ);
360 if (ret == 0) {
361 drm_err(&i915->drm, "global state timed out\n");
362 return -ETIMEDOUT;
363 }
364 }
365
366 return 0;
367 }
368
369 void
intel_atomic_global_state_commit_done(struct intel_atomic_state * state)370 intel_atomic_global_state_commit_done(struct intel_atomic_state *state)
371 {
372 const struct intel_global_state *new_obj_state;
373 struct intel_global_obj *obj;
374 int i;
375
376 for_each_new_global_obj_in_state(state, obj, new_obj_state, i) {
377 struct intel_global_commit *commit = new_obj_state->commit;
378
379 if (!new_obj_state->serialized)
380 continue;
381
382 complete_all(&commit->done);
383 }
384 }
385