xref: /linux/drivers/gpu/drm/i915/display/intel_dbuf_bw.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2025 Intel Corporation
4  */
5 
6 #include <drm/drm_print.h>
7 
8 #include "intel_dbuf_bw.h"
9 #include "intel_display_core.h"
10 #include "intel_display_types.h"
11 #include "skl_watermark.h"
12 
13 struct intel_dbuf_bw {
14 	unsigned int max_bw[I915_MAX_DBUF_SLICES];
15 	u8 active_planes[I915_MAX_DBUF_SLICES];
16 };
17 
18 struct intel_dbuf_bw_state {
19 	struct intel_global_state base;
20 	struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
21 };
22 
23 struct intel_dbuf_bw_state *to_intel_dbuf_bw_state(struct intel_global_state *obj_state)
24 {
25 	return container_of(obj_state, struct intel_dbuf_bw_state, base);
26 }
27 
28 struct intel_dbuf_bw_state *
29 intel_atomic_get_old_dbuf_bw_state(struct intel_atomic_state *state)
30 {
31 	struct intel_display *display = to_intel_display(state);
32 	struct intel_global_state *dbuf_bw_state;
33 
34 	dbuf_bw_state = intel_atomic_get_old_global_obj_state(state, &display->dbuf_bw.obj);
35 
36 	return to_intel_dbuf_bw_state(dbuf_bw_state);
37 }
38 
39 struct intel_dbuf_bw_state *
40 intel_atomic_get_new_dbuf_bw_state(struct intel_atomic_state *state)
41 {
42 	struct intel_display *display = to_intel_display(state);
43 	struct intel_global_state *dbuf_bw_state;
44 
45 	dbuf_bw_state = intel_atomic_get_new_global_obj_state(state, &display->dbuf_bw.obj);
46 
47 	return to_intel_dbuf_bw_state(dbuf_bw_state);
48 }
49 
50 struct intel_dbuf_bw_state *
51 intel_atomic_get_dbuf_bw_state(struct intel_atomic_state *state)
52 {
53 	struct intel_display *display = to_intel_display(state);
54 	struct intel_global_state *dbuf_bw_state;
55 
56 	dbuf_bw_state = intel_atomic_get_global_obj_state(state, &display->dbuf_bw.obj);
57 	if (IS_ERR(dbuf_bw_state))
58 		return ERR_CAST(dbuf_bw_state);
59 
60 	return to_intel_dbuf_bw_state(dbuf_bw_state);
61 }
62 
63 static bool intel_dbuf_bw_changed(struct intel_display *display,
64 				  const struct intel_dbuf_bw *old_dbuf_bw,
65 				  const struct intel_dbuf_bw *new_dbuf_bw)
66 {
67 	enum dbuf_slice slice;
68 
69 	for_each_dbuf_slice(display, slice) {
70 		if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] ||
71 		    old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice])
72 			return true;
73 	}
74 
75 	return false;
76 }
77 
78 static bool intel_dbuf_bw_state_changed(struct intel_display *display,
79 					const struct intel_dbuf_bw_state *old_dbuf_bw_state,
80 					const struct intel_dbuf_bw_state *new_dbuf_bw_state)
81 {
82 	enum pipe pipe;
83 
84 	for_each_pipe(display, pipe) {
85 		const struct intel_dbuf_bw *old_dbuf_bw =
86 			&old_dbuf_bw_state->dbuf_bw[pipe];
87 		const struct intel_dbuf_bw *new_dbuf_bw =
88 			&new_dbuf_bw_state->dbuf_bw[pipe];
89 
90 		if (intel_dbuf_bw_changed(display, old_dbuf_bw, new_dbuf_bw))
91 			return true;
92 	}
93 
94 	return false;
95 }
96 
97 static void skl_plane_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
98 				   struct intel_crtc *crtc,
99 				   enum plane_id plane_id,
100 				   const struct skl_ddb_entry *ddb,
101 				   unsigned int data_rate)
102 {
103 	struct intel_display *display = to_intel_display(crtc);
104 	unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(display, ddb);
105 	enum dbuf_slice slice;
106 
107 	/*
108 	 * The arbiter can only really guarantee an
109 	 * equal share of the total bw to each plane.
110 	 */
111 	for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) {
112 		dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate);
113 		dbuf_bw->active_planes[slice] |= BIT(plane_id);
114 	}
115 }
116 
117 static void skl_crtc_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
118 				  const struct intel_crtc_state *crtc_state)
119 {
120 	struct intel_display *display = to_intel_display(crtc_state);
121 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
122 	enum plane_id plane_id;
123 
124 	memset(dbuf_bw, 0, sizeof(*dbuf_bw));
125 
126 	if (!crtc_state->hw.active)
127 		return;
128 
129 	for_each_plane_id_on_crtc(crtc, plane_id) {
130 		/*
131 		 * We assume cursors are small enough
132 		 * to not cause bandwidth problems.
133 		 */
134 		if (plane_id == PLANE_CURSOR)
135 			continue;
136 
137 		skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
138 				       &crtc_state->wm.skl.plane_ddb[plane_id],
139 				       crtc_state->data_rate[plane_id]);
140 
141 		if (DISPLAY_VER(display) < 11)
142 			skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
143 					       &crtc_state->wm.skl.plane_ddb_y[plane_id],
144 					       crtc_state->data_rate[plane_id]);
145 	}
146 }
147 
148 /* "Maximum Data Buffer Bandwidth" */
149 int intel_dbuf_bw_min_cdclk(struct intel_display *display,
150 			    const struct intel_dbuf_bw_state *dbuf_bw_state)
151 {
152 	unsigned int total_max_bw = 0;
153 	enum dbuf_slice slice;
154 
155 	for_each_dbuf_slice(display, slice) {
156 		int num_active_planes = 0;
157 		unsigned int max_bw = 0;
158 		enum pipe pipe;
159 
160 		/*
161 		 * The arbiter can only really guarantee an
162 		 * equal share of the total bw to each plane.
163 		 */
164 		for_each_pipe(display, pipe) {
165 			const struct intel_dbuf_bw *dbuf_bw = &dbuf_bw_state->dbuf_bw[pipe];
166 
167 			max_bw = max(dbuf_bw->max_bw[slice], max_bw);
168 			num_active_planes += hweight8(dbuf_bw->active_planes[slice]);
169 		}
170 		max_bw *= num_active_planes;
171 
172 		total_max_bw = max(total_max_bw, max_bw);
173 	}
174 
175 	return DIV_ROUND_UP(total_max_bw, 64);
176 }
177 
178 int intel_dbuf_bw_calc_min_cdclk(struct intel_atomic_state *state,
179 				 bool *need_cdclk_calc)
180 {
181 	struct intel_display *display = to_intel_display(state);
182 	struct intel_dbuf_bw_state *new_dbuf_bw_state = NULL;
183 	const struct intel_dbuf_bw_state *old_dbuf_bw_state = NULL;
184 	const struct intel_crtc_state *old_crtc_state;
185 	const struct intel_crtc_state *new_crtc_state;
186 	struct intel_crtc *crtc;
187 	int ret, i;
188 
189 	if (DISPLAY_VER(display) < 9)
190 		return 0;
191 
192 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
193 					    new_crtc_state, i) {
194 		struct intel_dbuf_bw old_dbuf_bw, new_dbuf_bw;
195 
196 		skl_crtc_calc_dbuf_bw(&old_dbuf_bw, old_crtc_state);
197 		skl_crtc_calc_dbuf_bw(&new_dbuf_bw, new_crtc_state);
198 
199 		if (!intel_dbuf_bw_changed(display, &old_dbuf_bw, &new_dbuf_bw))
200 			continue;
201 
202 		new_dbuf_bw_state = intel_atomic_get_dbuf_bw_state(state);
203 		if (IS_ERR(new_dbuf_bw_state))
204 			return PTR_ERR(new_dbuf_bw_state);
205 
206 		old_dbuf_bw_state = intel_atomic_get_old_dbuf_bw_state(state);
207 
208 		new_dbuf_bw_state->dbuf_bw[crtc->pipe] = new_dbuf_bw;
209 	}
210 
211 	if (!old_dbuf_bw_state)
212 		return 0;
213 
214 	if (intel_dbuf_bw_state_changed(display, old_dbuf_bw_state, new_dbuf_bw_state)) {
215 		ret = intel_atomic_lock_global_state(&new_dbuf_bw_state->base);
216 		if (ret)
217 			return ret;
218 	}
219 
220 	ret = intel_cdclk_update_dbuf_bw_min_cdclk(state,
221 						   intel_dbuf_bw_min_cdclk(display, old_dbuf_bw_state),
222 						   intel_dbuf_bw_min_cdclk(display, new_dbuf_bw_state),
223 						   need_cdclk_calc);
224 	if (ret)
225 		return ret;
226 
227 	return 0;
228 }
229 
230 void intel_dbuf_bw_update_hw_state(struct intel_display *display)
231 {
232 	struct intel_dbuf_bw_state *dbuf_bw_state =
233 		to_intel_dbuf_bw_state(display->dbuf_bw.obj.state);
234 	struct intel_crtc *crtc;
235 
236 	if (DISPLAY_VER(display) < 9)
237 		return;
238 
239 	for_each_intel_crtc(display->drm, crtc) {
240 		const struct intel_crtc_state *crtc_state =
241 			to_intel_crtc_state(crtc->base.state);
242 
243 		skl_crtc_calc_dbuf_bw(&dbuf_bw_state->dbuf_bw[crtc->pipe], crtc_state);
244 	}
245 }
246 
247 void intel_dbuf_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
248 {
249 	struct intel_display *display = to_intel_display(crtc);
250 	struct intel_dbuf_bw_state *dbuf_bw_state =
251 		to_intel_dbuf_bw_state(display->dbuf_bw.obj.state);
252 	enum pipe pipe = crtc->pipe;
253 
254 	if (DISPLAY_VER(display) < 9)
255 		return;
256 
257 	memset(&dbuf_bw_state->dbuf_bw[pipe], 0, sizeof(dbuf_bw_state->dbuf_bw[pipe]));
258 }
259 
260 static struct intel_global_state *
261 intel_dbuf_bw_duplicate_state(struct intel_global_obj *obj)
262 {
263 	struct intel_dbuf_bw_state *state;
264 
265 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
266 	if (!state)
267 		return NULL;
268 
269 	return &state->base;
270 }
271 
272 static void intel_dbuf_bw_destroy_state(struct intel_global_obj *obj,
273 					struct intel_global_state *state)
274 {
275 	kfree(state);
276 }
277 
278 static const struct intel_global_state_funcs intel_dbuf_bw_funcs = {
279 	.atomic_duplicate_state = intel_dbuf_bw_duplicate_state,
280 	.atomic_destroy_state = intel_dbuf_bw_destroy_state,
281 };
282 
283 int intel_dbuf_bw_init(struct intel_display *display)
284 {
285 	struct intel_dbuf_bw_state *state;
286 
287 	state = kzalloc(sizeof(*state), GFP_KERNEL);
288 	if (!state)
289 		return -ENOMEM;
290 
291 	intel_atomic_global_obj_init(display, &display->dbuf_bw.obj,
292 				     &state->base, &intel_dbuf_bw_funcs);
293 
294 	return 0;
295 }
296