xref: /linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_ism.c (revision 92c4c9fdc838d3b41a996bb700ea64b9e78fc7ea)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2026 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: AMD
24  *
25  */
26 
27 #include <linux/types.h>
28 #include <drm/drm_vblank.h>
29 
30 #include "dc.h"
31 #include "amdgpu.h"
32 #include "amdgpu_dm_ism.h"
33 #include "amdgpu_dm_crtc.h"
34 #include "amdgpu_dm_trace.h"
35 
36 /**
37  * dm_ism_next_state - Get next state based on current state and event
38  * @current_state: current ISM state
39  * @event: event being processed
40  * @next_state: place to store the next state
41  *
42  * This function defines the idle state management FSM. Invalid transitions
43  * are ignored and will not progress the FSM.
44  */
dm_ism_next_state(enum amdgpu_dm_ism_state current_state,enum amdgpu_dm_ism_event event,enum amdgpu_dm_ism_state * next_state)45 static bool dm_ism_next_state(enum amdgpu_dm_ism_state current_state,
46 			      enum amdgpu_dm_ism_event event,
47 			      enum amdgpu_dm_ism_state *next_state)
48 {
49 	switch (STATE_EVENT(current_state, event)) {
50 	case STATE_EVENT(DM_ISM_STATE_FULL_POWER_RUNNING,
51 			 DM_ISM_EVENT_ENTER_IDLE_REQUESTED):
52 		*next_state = DM_ISM_STATE_HYSTERESIS_WAITING;
53 		break;
54 	case STATE_EVENT(DM_ISM_STATE_FULL_POWER_RUNNING,
55 			 DM_ISM_EVENT_BEGIN_CURSOR_UPDATE):
56 		*next_state = DM_ISM_STATE_FULL_POWER_BUSY;
57 		break;
58 
59 	case STATE_EVENT(DM_ISM_STATE_FULL_POWER_BUSY,
60 			 DM_ISM_EVENT_ENTER_IDLE_REQUESTED):
61 		*next_state = DM_ISM_STATE_HYSTERESIS_BUSY;
62 		break;
63 	case STATE_EVENT(DM_ISM_STATE_FULL_POWER_BUSY,
64 			 DM_ISM_EVENT_END_CURSOR_UPDATE):
65 		*next_state = DM_ISM_STATE_FULL_POWER_RUNNING;
66 		break;
67 
68 	case STATE_EVENT(DM_ISM_STATE_HYSTERESIS_WAITING,
69 			 DM_ISM_EVENT_EXIT_IDLE_REQUESTED):
70 		*next_state = DM_ISM_STATE_TIMER_ABORTED;
71 		break;
72 	case STATE_EVENT(DM_ISM_STATE_HYSTERESIS_WAITING,
73 			 DM_ISM_EVENT_BEGIN_CURSOR_UPDATE):
74 		*next_state = DM_ISM_STATE_HYSTERESIS_BUSY;
75 		break;
76 	case STATE_EVENT(DM_ISM_STATE_HYSTERESIS_WAITING,
77 			 DM_ISM_EVENT_TIMER_ELAPSED):
78 		*next_state = DM_ISM_STATE_OPTIMIZED_IDLE;
79 		break;
80 	case STATE_EVENT(DM_ISM_STATE_HYSTERESIS_WAITING,
81 			 DM_ISM_EVENT_IMMEDIATE):
82 		*next_state = DM_ISM_STATE_OPTIMIZED_IDLE;
83 		break;
84 
85 	case STATE_EVENT(DM_ISM_STATE_HYSTERESIS_BUSY,
86 			 DM_ISM_EVENT_EXIT_IDLE_REQUESTED):
87 		*next_state = DM_ISM_STATE_FULL_POWER_BUSY;
88 		break;
89 	case STATE_EVENT(DM_ISM_STATE_HYSTERESIS_BUSY,
90 			 DM_ISM_EVENT_END_CURSOR_UPDATE):
91 		*next_state = DM_ISM_STATE_HYSTERESIS_WAITING;
92 		break;
93 
94 	case STATE_EVENT(DM_ISM_STATE_OPTIMIZED_IDLE,
95 			 DM_ISM_EVENT_EXIT_IDLE_REQUESTED):
96 		*next_state = DM_ISM_STATE_FULL_POWER_RUNNING;
97 		break;
98 	case STATE_EVENT(DM_ISM_STATE_OPTIMIZED_IDLE,
99 			 DM_ISM_EVENT_BEGIN_CURSOR_UPDATE):
100 		*next_state = DM_ISM_STATE_HYSTERESIS_BUSY;
101 		break;
102 	case STATE_EVENT(DM_ISM_STATE_OPTIMIZED_IDLE,
103 			 DM_ISM_EVENT_SSO_TIMER_ELAPSED):
104 	case STATE_EVENT(DM_ISM_STATE_OPTIMIZED_IDLE,
105 			 DM_ISM_EVENT_IMMEDIATE):
106 		*next_state = DM_ISM_STATE_OPTIMIZED_IDLE_SSO;
107 		break;
108 
109 	case STATE_EVENT(DM_ISM_STATE_OPTIMIZED_IDLE_SSO,
110 			 DM_ISM_EVENT_EXIT_IDLE_REQUESTED):
111 		*next_state = DM_ISM_STATE_FULL_POWER_RUNNING;
112 		break;
113 	case STATE_EVENT(DM_ISM_STATE_OPTIMIZED_IDLE_SSO,
114 			 DM_ISM_EVENT_BEGIN_CURSOR_UPDATE):
115 		*next_state = DM_ISM_STATE_HYSTERESIS_BUSY;
116 		break;
117 
118 	case STATE_EVENT(DM_ISM_STATE_TIMER_ABORTED,
119 			 DM_ISM_EVENT_IMMEDIATE):
120 		*next_state = DM_ISM_STATE_FULL_POWER_RUNNING;
121 		break;
122 
123 	default:
124 		return false;
125 	}
126 	return true;
127 }
128 
dm_ism_get_sso_delay(const struct amdgpu_dm_ism * ism,const struct dc_stream_state * stream)129 static uint64_t dm_ism_get_sso_delay(const struct amdgpu_dm_ism *ism,
130 				     const struct dc_stream_state *stream)
131 {
132 	const struct amdgpu_dm_ism_config *config = &ism->config;
133 	uint32_t v_total, h_total;
134 	uint64_t one_frame_ns, sso_delay_ns;
135 
136 	if (!stream)
137 		return 0;
138 
139 	if (!config->sso_num_frames)
140 		return 0;
141 
142 	v_total = stream->timing.v_total;
143 	h_total = stream->timing.h_total;
144 
145 	one_frame_ns = div64_u64(v_total * h_total * 10000000ull,
146 				 stream->timing.pix_clk_100hz);
147 	sso_delay_ns = config->sso_num_frames * one_frame_ns;
148 
149 	return sso_delay_ns;
150 }
151 
152 /**
153  * dm_ism_get_idle_allow_delay - Calculate hysteresis-based idle allow delay
154  * @ism: ISM instance containing configuration, history, and current state
155  * @stream: display stream used to derive frame timing values for delay
156  *
157  * Calculates the delay before allowing idle optimizations based on recent
158  * idle history and the current stream timing.
159  */
dm_ism_get_idle_allow_delay(const struct amdgpu_dm_ism * ism,const struct dc_stream_state * stream)160 static uint64_t dm_ism_get_idle_allow_delay(const struct amdgpu_dm_ism *ism,
161 					    const struct dc_stream_state *stream)
162 {
163 	const struct amdgpu_dm_ism_config *config = &ism->config;
164 	uint32_t v_total, h_total;
165 	uint64_t one_frame_ns, short_idle_ns, old_hist_ns;
166 	uint32_t history_size;
167 	int pos;
168 	uint32_t short_idle_count = 0;
169 	uint64_t ret_ns = 0;
170 
171 	if (!stream)
172 		return 0;
173 
174 	if (!config->filter_num_frames)
175 		return 0;
176 	if (!config->filter_entry_count)
177 		return 0;
178 	if (!config->activation_num_delay_frames)
179 		return 0;
180 
181 	v_total = stream->timing.v_total;
182 	h_total = stream->timing.h_total;
183 
184 	one_frame_ns = div64_u64(v_total * h_total * 10000000ull,
185 				 stream->timing.pix_clk_100hz);
186 
187 	short_idle_ns = config->filter_num_frames * one_frame_ns;
188 	old_hist_ns = config->filter_old_history_threshold * one_frame_ns;
189 
190 	/*
191 	 * Look back into the recent history and count how many times we entered
192 	 * idle power state for a short duration of time
193 	 */
194 	history_size = min(
195 		max(config->filter_history_size, config->filter_entry_count),
196 		AMDGPU_DM_IDLE_HIST_LEN);
197 	pos = ism->next_record_idx;
198 
199 	for (int k = 0; k < history_size; k++) {
200 		if (pos <= 0 || pos > AMDGPU_DM_IDLE_HIST_LEN)
201 			pos = AMDGPU_DM_IDLE_HIST_LEN;
202 		pos -= 1;
203 
204 		if (ism->records[pos].duration_ns <= short_idle_ns)
205 			short_idle_count += 1;
206 
207 		if (short_idle_count >= config->filter_entry_count)
208 			break;
209 
210 		if (old_hist_ns > 0 &&
211 		    ism->last_idle_timestamp_ns - ism->records[pos].timestamp_ns > old_hist_ns)
212 			break;
213 	}
214 
215 	if (short_idle_count >= config->filter_entry_count)
216 		ret_ns = config->activation_num_delay_frames * one_frame_ns;
217 
218 	return ret_ns;
219 }
220 
221 /**
222  * dm_ism_insert_record - Insert a record into the circular history buffer
223  * @ism: ISM instance
224  */
dm_ism_insert_record(struct amdgpu_dm_ism * ism)225 static void dm_ism_insert_record(struct amdgpu_dm_ism *ism)
226 {
227 	struct amdgpu_dm_ism_record *record;
228 
229 	if (ism->next_record_idx < 0 ||
230 	    ism->next_record_idx >= AMDGPU_DM_IDLE_HIST_LEN)
231 		ism->next_record_idx = 0;
232 
233 	record = &ism->records[ism->next_record_idx];
234 	ism->next_record_idx += 1;
235 
236 	record->timestamp_ns = ktime_get_ns();
237 	record->duration_ns =
238 		record->timestamp_ns - ism->last_idle_timestamp_ns;
239 }
240 
241 
dm_ism_set_last_idle_ts(struct amdgpu_dm_ism * ism)242 static void dm_ism_set_last_idle_ts(struct amdgpu_dm_ism *ism)
243 {
244 	ism->last_idle_timestamp_ns = ktime_get_ns();
245 }
246 
247 
dm_ism_trigger_event(struct amdgpu_dm_ism * ism,enum amdgpu_dm_ism_event event)248 static bool dm_ism_trigger_event(struct amdgpu_dm_ism *ism,
249 				 enum amdgpu_dm_ism_event event)
250 {
251 	enum amdgpu_dm_ism_state next_state;
252 
253 	bool gotNextState = dm_ism_next_state(ism->current_state, event,
254 					      &next_state);
255 
256 	if (gotNextState) {
257 		ism->previous_state = ism->current_state;
258 		ism->current_state = next_state;
259 	}
260 
261 	return gotNextState;
262 }
263 
264 
dm_ism_commit_idle_optimization_state(struct amdgpu_dm_ism * ism,struct dc_stream_state * stream,bool vblank_enabled,bool allow_panel_sso)265 static void dm_ism_commit_idle_optimization_state(struct amdgpu_dm_ism *ism,
266 					     struct dc_stream_state *stream,
267 					     bool vblank_enabled,
268 					     bool allow_panel_sso)
269 {
270 	struct amdgpu_crtc *acrtc = ism_to_amdgpu_crtc(ism);
271 	struct amdgpu_device *adev = drm_to_adev(acrtc->base.dev);
272 	struct amdgpu_display_manager *dm = &adev->dm;
273 
274 	trace_amdgpu_dm_ism_commit(dm->active_vblank_irq_count,
275 				   vblank_enabled,
276 				   allow_panel_sso);
277 
278 	/*
279 	 * If there is an active vblank requestor, or if SSO is being engaged,
280 	 * then disallow idle optimizations.
281 	 */
282 	if (vblank_enabled || allow_panel_sso)
283 		dc_allow_idle_optimizations(dm->dc, false);
284 
285 	/*
286 	 * Control PSR based on vblank requirements from OS
287 	 *
288 	 * If panel supports PSR SU/Replay, there's no need to exit self-refresh
289 	 * when OS is submitting fast atomic commits, as they can allow
290 	 * self-refresh during vblank periods.
291 	 */
292 	if (stream && stream->link) {
293 		/*
294 		 * If allow_panel_sso is true when disabling vblank, allow
295 		 * deeper panel sleep states such as PSR1 and Replay static
296 		 * screen optimization.
297 		 */
298 		if (!vblank_enabled && allow_panel_sso) {
299 			amdgpu_dm_crtc_set_panel_sr_feature(
300 				dm, acrtc, stream, false,
301 				acrtc->dm_irq_params.allow_sr_entry);
302 		} else if (vblank_enabled) {
303 			/* Make sure to exit SSO on vblank enable */
304 			amdgpu_dm_crtc_set_panel_sr_feature(
305 				dm, acrtc, stream, true,
306 				acrtc->dm_irq_params.allow_sr_entry);
307 		}
308 		/*
309 		 * Else, vblank_enabled == false and allow_panel_sso == false;
310 		 * do nothing here.
311 		 */
312 	}
313 
314 	/*
315 	 * Check for any active drm vblank requestors on other CRTCs
316 	 * (dm->active_vblank_irq_count) before allowing HW-wide idle
317 	 * optimizations.
318 	 *
319 	 * There's no need to have a "balanced" check when disallowing idle
320 	 * optimizations at the start of this func -- we should disallow
321 	 * whenever there's *an* active CRTC.
322 	 */
323 	if (!vblank_enabled && dm->active_vblank_irq_count == 0) {
324 		dc_post_update_surfaces_to_stream(dm->dc);
325 		dc_allow_idle_optimizations(dm->dc, true);
326 	}
327 }
328 
329 
dm_ism_dispatch_power_state(struct amdgpu_dm_ism * ism,struct dm_crtc_state * acrtc_state,enum amdgpu_dm_ism_event event)330 static enum amdgpu_dm_ism_event dm_ism_dispatch_power_state(
331 	struct amdgpu_dm_ism *ism,
332 	struct dm_crtc_state *acrtc_state,
333 	enum amdgpu_dm_ism_event event)
334 {
335 	enum amdgpu_dm_ism_event ret = event;
336 	const struct amdgpu_dm_ism_config *config = &ism->config;
337 	uint64_t delay_ns, sso_delay_ns;
338 
339 	switch (ism->previous_state) {
340 	case DM_ISM_STATE_HYSTERESIS_WAITING:
341 		/*
342 		 * Stop the timer if it was set, and we're not running from the
343 		 * idle allow worker.
344 		 */
345 		if (ism->current_state != DM_ISM_STATE_OPTIMIZED_IDLE &&
346 		    ism->current_state != DM_ISM_STATE_OPTIMIZED_IDLE_SSO)
347 			cancel_delayed_work(&ism->delayed_work);
348 		break;
349 	case DM_ISM_STATE_OPTIMIZED_IDLE:
350 		if (ism->current_state == DM_ISM_STATE_OPTIMIZED_IDLE_SSO)
351 			break;
352 		/* If idle disallow, cancel SSO work and insert record */
353 		cancel_delayed_work(&ism->sso_delayed_work);
354 		dm_ism_insert_record(ism);
355 		dm_ism_commit_idle_optimization_state(ism, acrtc_state->stream,
356 						      true, false);
357 		break;
358 	case DM_ISM_STATE_OPTIMIZED_IDLE_SSO:
359 		/* Disable idle optimization */
360 		dm_ism_insert_record(ism);
361 		dm_ism_commit_idle_optimization_state(ism, acrtc_state->stream,
362 						      true, false);
363 		break;
364 	default:
365 		break;
366 	}
367 
368 	switch (ism->current_state) {
369 	case DM_ISM_STATE_HYSTERESIS_WAITING:
370 		dm_ism_set_last_idle_ts(ism);
371 
372 		/* CRTC can be disabled; allow immediate idle */
373 		if (!acrtc_state->stream) {
374 			ret = DM_ISM_EVENT_IMMEDIATE;
375 			break;
376 		}
377 
378 		delay_ns = dm_ism_get_idle_allow_delay(ism,
379 						       acrtc_state->stream);
380 		if (delay_ns == 0) {
381 			ret = DM_ISM_EVENT_IMMEDIATE;
382 			break;
383 		}
384 
385 		/* Schedule worker */
386 		mod_delayed_work(system_unbound_wq, &ism->delayed_work,
387 				 nsecs_to_jiffies(delay_ns));
388 
389 		break;
390 	case DM_ISM_STATE_OPTIMIZED_IDLE:
391 		sso_delay_ns = dm_ism_get_sso_delay(ism, acrtc_state->stream);
392 		if (sso_delay_ns == 0)
393 			ret = DM_ISM_EVENT_IMMEDIATE;
394 		else if (config->sso_num_frames < config->filter_num_frames) {
395 			/*
396 			 * If sso_num_frames is less than hysteresis frames, it
397 			 * indicates that allowing idle here, then disallowing
398 			 * idle after sso_num_frames has expired, will likely
399 			 * have a negative power impact. Skip idle allow here,
400 			 * and let the sso_delayed_work handle it.
401 			 */
402 			mod_delayed_work(system_unbound_wq,
403 					 &ism->sso_delayed_work,
404 					 nsecs_to_jiffies(sso_delay_ns));
405 		} else {
406 			/* Enable idle optimization without SSO */
407 			dm_ism_commit_idle_optimization_state(
408 				ism, acrtc_state->stream, false, false);
409 			mod_delayed_work(system_unbound_wq,
410 					 &ism->sso_delayed_work,
411 					 nsecs_to_jiffies(sso_delay_ns));
412 		}
413 		break;
414 	case DM_ISM_STATE_OPTIMIZED_IDLE_SSO:
415 		/* Enable static screen optimizations. */
416 		dm_ism_commit_idle_optimization_state(ism, acrtc_state->stream,
417 						      false, true);
418 		break;
419 	case DM_ISM_STATE_TIMER_ABORTED:
420 		dm_ism_insert_record(ism);
421 		dm_ism_commit_idle_optimization_state(ism, acrtc_state->stream,
422 						      true, false);
423 		ret = DM_ISM_EVENT_IMMEDIATE;
424 		break;
425 	default:
426 		break;
427 	}
428 
429 	return ret;
430 }
431 
432 static char *dm_ism_events_str[DM_ISM_NUM_EVENTS] = {
433 	[DM_ISM_EVENT_IMMEDIATE] = "IMMEDIATE",
434 	[DM_ISM_EVENT_ENTER_IDLE_REQUESTED] = "ENTER_IDLE_REQUESTED",
435 	[DM_ISM_EVENT_EXIT_IDLE_REQUESTED] = "EXIT_IDLE_REQUESTED",
436 	[DM_ISM_EVENT_BEGIN_CURSOR_UPDATE] = "BEGIN_CURSOR_UPDATE",
437 	[DM_ISM_EVENT_END_CURSOR_UPDATE] = "END_CURSOR_UPDATE",
438 	[DM_ISM_EVENT_TIMER_ELAPSED] = "TIMER_ELAPSED",
439 	[DM_ISM_EVENT_SSO_TIMER_ELAPSED] = "SSO_TIMER_ELAPSED",
440 };
441 
442 static char *dm_ism_states_str[DM_ISM_NUM_STATES] = {
443 	[DM_ISM_STATE_FULL_POWER_RUNNING] = "FULL_POWER_RUNNING",
444 	[DM_ISM_STATE_FULL_POWER_BUSY] = "FULL_POWER_BUSY",
445 	[DM_ISM_STATE_HYSTERESIS_WAITING] = "HYSTERESIS_WAITING",
446 	[DM_ISM_STATE_HYSTERESIS_BUSY] = "HYSTERESIS_BUSY",
447 	[DM_ISM_STATE_OPTIMIZED_IDLE] = "OPTIMIZED_IDLE",
448 	[DM_ISM_STATE_OPTIMIZED_IDLE_SSO] = "OPTIMIZED_IDLE_SSO",
449 	[DM_ISM_STATE_TIMER_ABORTED] = "TIMER_ABORTED",
450 };
451 
452 
amdgpu_dm_ism_commit_event(struct amdgpu_dm_ism * ism,enum amdgpu_dm_ism_event event)453 void amdgpu_dm_ism_commit_event(struct amdgpu_dm_ism *ism,
454 				enum amdgpu_dm_ism_event event)
455 {
456 	enum amdgpu_dm_ism_event next_event = event;
457 	struct amdgpu_crtc *acrtc = ism_to_amdgpu_crtc(ism);
458 	struct amdgpu_device *adev = drm_to_adev(acrtc->base.dev);
459 	struct amdgpu_display_manager *dm = &adev->dm;
460 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(acrtc->base.state);
461 
462 	/* ISM transitions must be called with mutex acquired */
463 	ASSERT(mutex_is_locked(&dm->dc_lock));
464 
465 	/* ISM should not run after dc is destroyed */
466 	ASSERT(dm->dc);
467 
468 	if (!acrtc_state) {
469 		trace_amdgpu_dm_ism_event(acrtc->crtc_id, "NO_STATE",
470 					  "NO_STATE", "N/A");
471 		return;
472 	}
473 
474 	do {
475 		bool transition = dm_ism_trigger_event(ism, event);
476 
477 		next_event = DM_ISM_NUM_EVENTS;
478 		if (transition) {
479 			trace_amdgpu_dm_ism_event(
480 				acrtc->crtc_id,
481 				dm_ism_states_str[ism->previous_state],
482 				dm_ism_states_str[ism->current_state],
483 				dm_ism_events_str[event]);
484 			next_event = dm_ism_dispatch_power_state(
485 				ism, acrtc_state, next_event);
486 		} else {
487 			trace_amdgpu_dm_ism_event(
488 				acrtc->crtc_id,
489 				dm_ism_states_str[ism->current_state],
490 				dm_ism_states_str[ism->current_state],
491 				dm_ism_events_str[event]);
492 		}
493 
494 		event = next_event;
495 
496 	} while (next_event < DM_ISM_NUM_EVENTS);
497 }
498 
499 
dm_ism_delayed_work_func(struct work_struct * work)500 static void dm_ism_delayed_work_func(struct work_struct *work)
501 {
502 	struct amdgpu_dm_ism *ism =
503 		container_of(work, struct amdgpu_dm_ism, delayed_work.work);
504 	struct amdgpu_crtc *acrtc = ism_to_amdgpu_crtc(ism);
505 	struct amdgpu_device *adev = drm_to_adev(acrtc->base.dev);
506 	struct amdgpu_display_manager *dm = &adev->dm;
507 
508 	guard(mutex)(&dm->dc_lock);
509 
510 	amdgpu_dm_ism_commit_event(ism, DM_ISM_EVENT_TIMER_ELAPSED);
511 }
512 
dm_ism_sso_delayed_work_func(struct work_struct * work)513 static void dm_ism_sso_delayed_work_func(struct work_struct *work)
514 {
515 	struct amdgpu_dm_ism *ism =
516 		container_of(work, struct amdgpu_dm_ism, sso_delayed_work.work);
517 	struct amdgpu_crtc *acrtc = ism_to_amdgpu_crtc(ism);
518 	struct amdgpu_device *adev = drm_to_adev(acrtc->base.dev);
519 	struct amdgpu_display_manager *dm = &adev->dm;
520 
521 	guard(mutex)(&dm->dc_lock);
522 
523 	amdgpu_dm_ism_commit_event(ism, DM_ISM_EVENT_SSO_TIMER_ELAPSED);
524 }
525 
526 /**
527  * amdgpu_dm_ism_disable - Disable the ISM
528  *
529  * @dm: The amdgpu display manager
530  *
531  * Disable the idle state manager by disabling any ISM work, canceling pending
532  * work, and waiting for in-progress work to finish. After disabling, the system
533  * is left in DM_ISM_STATE_FULL_POWER_RUNNING state.
534  */
amdgpu_dm_ism_disable(struct amdgpu_display_manager * dm)535 void amdgpu_dm_ism_disable(struct amdgpu_display_manager *dm)
536 {
537 	struct drm_crtc *crtc;
538 	struct amdgpu_crtc *acrtc;
539 	struct amdgpu_dm_ism *ism;
540 
541 	ASSERT(mutex_is_locked(&dm->dc_lock));
542 
543 	drm_for_each_crtc(crtc, dm->ddev) {
544 		acrtc = to_amdgpu_crtc(crtc);
545 		ism = &acrtc->ism;
546 
547 		/* Cancel and disable any pending work */
548 		disable_delayed_work_sync(&ism->delayed_work);
549 		disable_delayed_work_sync(&ism->sso_delayed_work);
550 
551 		/*
552 		 * When disabled, leave in FULL_POWER_RUNNING state.
553 		 * EXIT_IDLE will not queue any work
554 		 */
555 		amdgpu_dm_ism_commit_event(ism,
556 					   DM_ISM_EVENT_EXIT_IDLE_REQUESTED);
557 	}
558 }
559 
560 /**
561  * amdgpu_dm_ism_enable - enable the ISM
562  *
563  * @dm: The amdgpu display manager
564  *
565  * Re-enable the idle state manager by enabling work that was disabled by
566  * amdgpu_dm_ism_disable.
567  */
amdgpu_dm_ism_enable(struct amdgpu_display_manager * dm)568 void amdgpu_dm_ism_enable(struct amdgpu_display_manager *dm)
569 {
570 	struct drm_crtc *crtc;
571 	struct amdgpu_crtc *acrtc;
572 	struct amdgpu_dm_ism *ism;
573 
574 	drm_for_each_crtc(crtc, dm->ddev) {
575 		acrtc = to_amdgpu_crtc(crtc);
576 		ism = &acrtc->ism;
577 
578 		enable_delayed_work(&ism->delayed_work);
579 		enable_delayed_work(&ism->sso_delayed_work);
580 	}
581 }
582 
amdgpu_dm_ism_init(struct amdgpu_dm_ism * ism,struct amdgpu_dm_ism_config * config)583 void amdgpu_dm_ism_init(struct amdgpu_dm_ism *ism,
584 			struct amdgpu_dm_ism_config *config)
585 {
586 	ism->config = *config;
587 
588 	ism->current_state = DM_ISM_STATE_FULL_POWER_RUNNING;
589 	ism->previous_state = DM_ISM_STATE_FULL_POWER_RUNNING;
590 	ism->next_record_idx = 0;
591 	ism->last_idle_timestamp_ns = 0;
592 
593 	INIT_DELAYED_WORK(&ism->delayed_work, dm_ism_delayed_work_func);
594 	INIT_DELAYED_WORK(&ism->sso_delayed_work, dm_ism_sso_delayed_work_func);
595 }
596 
597 
amdgpu_dm_ism_fini(struct amdgpu_dm_ism * ism)598 void amdgpu_dm_ism_fini(struct amdgpu_dm_ism *ism)
599 {
600 	cancel_delayed_work_sync(&ism->sso_delayed_work);
601 	cancel_delayed_work_sync(&ism->delayed_work);
602 }
603