1 /*
2 * Copyright 2023 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26
27 #include "dcn30/dcn30_hubbub.h"
28 #include "dcn401_hubbub.h"
29 #include "dm_services.h"
30 #include "reg_helper.h"
31
32
33 #define CTX \
34 hubbub2->base.ctx
35 #define DC_LOGGER \
36 hubbub2->base.ctx->logger
37 #define REG(reg)\
38 hubbub2->regs->reg
39
40 #undef FN
41 #define FN(reg_name, field_name) \
42 hubbub2->shifts->field_name, hubbub2->masks->field_name
43
dcn401_init_crb(struct hubbub * hubbub)44 static void dcn401_init_crb(struct hubbub *hubbub)
45 {
46 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
47
48 REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT,
49 &hubbub2->det0_size);
50
51 REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT,
52 &hubbub2->det1_size);
53
54 REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT,
55 &hubbub2->det2_size);
56
57 REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT,
58 &hubbub2->det3_size);
59
60 REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT,
61 &hubbub2->compbuf_size_segments);
62
63 REG_SET(COMPBUF_RESERVED_SPACE, 0,
64 COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32); // 256 64Bytes
65 }
66
hubbub401_program_urgent_watermarks(struct hubbub * hubbub,union dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)67 bool hubbub401_program_urgent_watermarks(
68 struct hubbub *hubbub,
69 union dcn_watermark_set *watermarks,
70 unsigned int refclk_mhz,
71 bool safe_to_lower)
72 {
73 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
74 bool wm_pending = false;
75
76 /* Repeat for water mark set A and B */
77 /* clock state A */
78 if (safe_to_lower || watermarks->dcn4x.a.urgent > hubbub2->watermarks.dcn4x.a.urgent) {
79 hubbub2->watermarks.dcn4x.a.urgent = watermarks->dcn4x.a.urgent;
80 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
81 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, watermarks->dcn4x.a.urgent);
82 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
83 "HW register value = 0x%x\n",
84 watermarks->dcn4x.a.urgent, watermarks->dcn4x.a.urgent);
85 } else if (watermarks->dcn4x.a.urgent < hubbub2->watermarks.dcn4x.a.urgent)
86 wm_pending = true;
87
88 /* determine the transfer time for a quantity of data for a particular requestor.*/
89 if (safe_to_lower || watermarks->dcn4x.a.frac_urg_bw_flip
90 > hubbub2->watermarks.dcn4x.a.frac_urg_bw_flip) {
91 hubbub2->watermarks.dcn4x.a.frac_urg_bw_flip = watermarks->dcn4x.a.frac_urg_bw_flip;
92 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
93 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->dcn4x.a.frac_urg_bw_flip);
94 } else if (watermarks->dcn4x.a.frac_urg_bw_flip
95 < hubbub2->watermarks.dcn4x.a.frac_urg_bw_flip)
96 wm_pending = true;
97
98 if (safe_to_lower || watermarks->dcn4x.a.frac_urg_bw_nom
99 > hubbub2->watermarks.dcn4x.a.frac_urg_bw_nom) {
100 hubbub2->watermarks.dcn4x.a.frac_urg_bw_nom = watermarks->dcn4x.a.frac_urg_bw_nom;
101 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
102 DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->dcn4x.a.frac_urg_bw_nom);
103 } else if (watermarks->dcn4x.a.frac_urg_bw_nom
104 < hubbub2->watermarks.dcn4x.a.frac_urg_bw_nom)
105 wm_pending = true;
106
107 if (safe_to_lower || watermarks->dcn4x.a.frac_urg_bw_mall
108 > hubbub2->watermarks.dcn4x.a.frac_urg_bw_mall) {
109 hubbub2->watermarks.dcn4x.a.frac_urg_bw_mall = watermarks->dcn4x.a.frac_urg_bw_mall;
110 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_MALL_A, 0,
111 DCHUBBUB_ARB_FRAC_URG_BW_MALL_A, watermarks->dcn4x.a.frac_urg_bw_mall);
112 } else if (watermarks->dcn4x.a.frac_urg_bw_mall < hubbub2->watermarks.dcn4x.a.frac_urg_bw_mall)
113 wm_pending = true;
114
115 if (safe_to_lower || watermarks->dcn4x.a.refcyc_per_trip_to_mem > hubbub2->watermarks.dcn4x.a.refcyc_per_trip_to_mem) {
116 hubbub2->watermarks.dcn4x.a.refcyc_per_trip_to_mem = watermarks->dcn4x.a.refcyc_per_trip_to_mem;
117 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
118 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, watermarks->dcn4x.a.refcyc_per_trip_to_mem);
119 } else if (watermarks->dcn4x.a.refcyc_per_trip_to_mem < hubbub2->watermarks.dcn4x.a.refcyc_per_trip_to_mem)
120 wm_pending = true;
121
122 if (safe_to_lower || watermarks->dcn4x.a.refcyc_per_meta_trip_to_mem > hubbub2->watermarks.dcn4x.a.refcyc_per_meta_trip_to_mem) {
123 hubbub2->watermarks.dcn4x.a.refcyc_per_meta_trip_to_mem = watermarks->dcn4x.a.refcyc_per_meta_trip_to_mem;
124 REG_SET(DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A, 0,
125 DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A, watermarks->dcn4x.a.refcyc_per_meta_trip_to_mem);
126 } else if (watermarks->dcn4x.a.refcyc_per_meta_trip_to_mem < hubbub2->watermarks.dcn4x.a.refcyc_per_meta_trip_to_mem)
127 wm_pending = true;
128
129
130 /* clock state B */
131 if (safe_to_lower || watermarks->dcn4x.b.urgent > hubbub2->watermarks.dcn4x.b.urgent) {
132 hubbub2->watermarks.dcn4x.b.urgent = watermarks->dcn4x.b.urgent;
133 REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
134 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, watermarks->dcn4x.b.urgent);
135 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
136 "HW register value = 0x%x\n",
137 watermarks->dcn4x.b.urgent, watermarks->dcn4x.b.urgent);
138 } else if (watermarks->dcn4x.b.urgent < hubbub2->watermarks.dcn4x.b.urgent)
139 wm_pending = true;
140
141 /* determine the transfer time for a quantity of data for a particular requestor.*/
142 if (safe_to_lower || watermarks->dcn4x.b.frac_urg_bw_flip
143 > hubbub2->watermarks.dcn4x.b.frac_urg_bw_flip) {
144 hubbub2->watermarks.dcn4x.b.frac_urg_bw_flip = watermarks->dcn4x.b.frac_urg_bw_flip;
145 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
146 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->dcn4x.b.frac_urg_bw_flip);
147 } else if (watermarks->dcn4x.b.frac_urg_bw_flip
148 < hubbub2->watermarks.dcn4x.b.frac_urg_bw_flip)
149 wm_pending = true;
150
151 if (safe_to_lower || watermarks->dcn4x.b.frac_urg_bw_nom
152 > hubbub2->watermarks.dcn4x.b.frac_urg_bw_nom) {
153 hubbub2->watermarks.dcn4x.b.frac_urg_bw_nom = watermarks->dcn4x.b.frac_urg_bw_nom;
154 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
155 DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->dcn4x.b.frac_urg_bw_nom);
156 } else if (watermarks->dcn4x.b.frac_urg_bw_nom
157 < hubbub2->watermarks.dcn4x.b.frac_urg_bw_nom)
158 wm_pending = true;
159
160 if (safe_to_lower || watermarks->dcn4x.b.frac_urg_bw_mall
161 > hubbub2->watermarks.dcn4x.b.frac_urg_bw_mall) {
162 hubbub2->watermarks.dcn4x.b.frac_urg_bw_mall = watermarks->dcn4x.b.frac_urg_bw_mall;
163 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_MALL_B, 0,
164 DCHUBBUB_ARB_FRAC_URG_BW_MALL_B, watermarks->dcn4x.b.frac_urg_bw_mall);
165 } else if (watermarks->dcn4x.b.frac_urg_bw_mall < hubbub2->watermarks.dcn4x.b.frac_urg_bw_mall)
166 wm_pending = true;
167
168 if (safe_to_lower || watermarks->dcn4x.b.refcyc_per_trip_to_mem > hubbub2->watermarks.dcn4x.b.refcyc_per_trip_to_mem) {
169 hubbub2->watermarks.dcn4x.b.refcyc_per_trip_to_mem = watermarks->dcn4x.b.refcyc_per_trip_to_mem;
170 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
171 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, watermarks->dcn4x.b.refcyc_per_trip_to_mem);
172 } else if (watermarks->dcn4x.b.refcyc_per_trip_to_mem < hubbub2->watermarks.dcn4x.b.refcyc_per_trip_to_mem)
173 wm_pending = true;
174
175 if (safe_to_lower || watermarks->dcn4x.b.refcyc_per_meta_trip_to_mem > hubbub2->watermarks.dcn4x.b.refcyc_per_meta_trip_to_mem) {
176 hubbub2->watermarks.dcn4x.b.refcyc_per_meta_trip_to_mem = watermarks->dcn4x.b.refcyc_per_meta_trip_to_mem;
177 REG_SET(DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B, 0,
178 DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B, watermarks->dcn4x.b.refcyc_per_meta_trip_to_mem);
179 } else if (watermarks->dcn4x.b.refcyc_per_meta_trip_to_mem < hubbub2->watermarks.dcn4x.b.refcyc_per_meta_trip_to_mem)
180 wm_pending = true;
181
182 return wm_pending;
183 }
184
hubbub401_program_stutter_watermarks(struct hubbub * hubbub,union dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)185 bool hubbub401_program_stutter_watermarks(
186 struct hubbub *hubbub,
187 union dcn_watermark_set *watermarks,
188 unsigned int refclk_mhz,
189 bool safe_to_lower)
190 {
191 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
192 bool wm_pending = false;
193
194 /* clock state A */
195 if (safe_to_lower || watermarks->dcn4x.a.sr_enter
196 > hubbub2->watermarks.dcn4x.a.sr_enter) {
197 hubbub2->watermarks.dcn4x.a.sr_enter =
198 watermarks->dcn4x.a.sr_enter;
199 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
200 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, watermarks->dcn4x.a.sr_enter);
201 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
202 "HW register value = 0x%x\n",
203 watermarks->dcn4x.a.sr_enter, watermarks->dcn4x.a.sr_enter);
204 // On dGPU Z states are N/A, so program all other 3 Stutter Enter wm A with the same value
205 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A, 0,
206 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A, watermarks->dcn4x.a.sr_enter);
207 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A, 0,
208 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A, watermarks->dcn4x.a.sr_enter);
209 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A, 0,
210 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A, watermarks->dcn4x.a.sr_enter);
211
212 } else if (watermarks->dcn4x.a.sr_enter
213 < hubbub2->watermarks.dcn4x.a.sr_enter)
214 wm_pending = true;
215
216 if (safe_to_lower || watermarks->dcn4x.a.sr_exit
217 > hubbub2->watermarks.dcn4x.a.sr_exit) {
218 hubbub2->watermarks.dcn4x.a.sr_exit =
219 watermarks->dcn4x.a.sr_exit;
220 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
221 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, watermarks->dcn4x.a.sr_exit);
222 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
223 "HW register value = 0x%x\n",
224 watermarks->dcn4x.a.sr_exit, watermarks->dcn4x.a.sr_exit);
225 // On dGPU Z states are N/A, so program all other 3 Stutter Exit wm A with the same value
226 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A, 0,
227 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A, watermarks->dcn4x.a.sr_exit);
228 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A, 0,
229 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A, watermarks->dcn4x.a.sr_exit);
230 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A, 0,
231 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A, watermarks->dcn4x.a.sr_exit);
232
233 } else if (watermarks->dcn4x.a.sr_exit
234 < hubbub2->watermarks.dcn4x.a.sr_exit)
235 wm_pending = true;
236
237 /* clock state B */
238 if (safe_to_lower || watermarks->dcn4x.b.sr_enter
239 > hubbub2->watermarks.dcn4x.b.sr_enter) {
240 hubbub2->watermarks.dcn4x.b.sr_enter =
241 watermarks->dcn4x.b.sr_enter;
242 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
243 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, watermarks->dcn4x.b.sr_enter);
244 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
245 "HW register value = 0x%x\n",
246 watermarks->dcn4x.b.sr_enter, watermarks->dcn4x.b.sr_enter);
247 // On dGPU Z states are N/A, so program all other 3 Stutter Enter wm A with the same value
248 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B, 0,
249 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B, watermarks->dcn4x.b.sr_enter);
250 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B, 0,
251 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B, watermarks->dcn4x.b.sr_enter);
252 REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B, 0,
253 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B, watermarks->dcn4x.b.sr_enter);
254
255 } else if (watermarks->dcn4x.b.sr_enter
256 < hubbub2->watermarks.dcn4x.b.sr_enter)
257 wm_pending = true;
258
259 if (safe_to_lower || watermarks->dcn4x.b.sr_exit
260 > hubbub2->watermarks.dcn4x.b.sr_exit) {
261 hubbub2->watermarks.dcn4x.b.sr_exit =
262 watermarks->dcn4x.b.sr_exit;
263 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
264 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, watermarks->dcn4x.b.sr_exit);
265 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
266 "HW register value = 0x%x\n",
267 watermarks->dcn4x.b.sr_exit, watermarks->dcn4x.b.sr_exit);
268 // On dGPU Z states are N/A, so program all other 3 Stutter Exit wm A with the same value
269 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B, 0,
270 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B, watermarks->dcn4x.b.sr_exit);
271 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B, 0,
272 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B, watermarks->dcn4x.b.sr_exit);
273 REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B, 0,
274 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B, watermarks->dcn4x.b.sr_exit);
275
276 } else if (watermarks->dcn4x.b.sr_exit
277 < hubbub2->watermarks.dcn4x.b.sr_exit)
278 wm_pending = true;
279
280 return wm_pending;
281 }
282
283
hubbub401_program_pstate_watermarks(struct hubbub * hubbub,union dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)284 bool hubbub401_program_pstate_watermarks(
285 struct hubbub *hubbub,
286 union dcn_watermark_set *watermarks,
287 unsigned int refclk_mhz,
288 bool safe_to_lower)
289 {
290 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
291 bool wm_pending = false;
292
293 /* Section for UCLK_PSTATE_CHANGE_WATERMARKS */
294 /* clock state A */
295 if (safe_to_lower || watermarks->dcn4x.a.uclk_pstate
296 > hubbub2->watermarks.dcn4x.a.uclk_pstate) {
297 hubbub2->watermarks.dcn4x.a.uclk_pstate =
298 watermarks->dcn4x.a.uclk_pstate;
299 REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, 0,
300 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, watermarks->dcn4x.a.uclk_pstate);
301 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
302 "HW register value = 0x%x\n\n",
303 watermarks->dcn4x.a.uclk_pstate, watermarks->dcn4x.a.uclk_pstate);
304 } else if (watermarks->dcn4x.a.uclk_pstate
305 < hubbub2->watermarks.dcn4x.a.uclk_pstate)
306 wm_pending = true;
307
308 /* clock state B */
309 if (safe_to_lower || watermarks->dcn4x.b.uclk_pstate
310 > hubbub2->watermarks.dcn4x.b.uclk_pstate) {
311 hubbub2->watermarks.dcn4x.b.uclk_pstate =
312 watermarks->dcn4x.b.uclk_pstate;
313 REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, 0,
314 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, watermarks->dcn4x.b.uclk_pstate);
315 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
316 "HW register value = 0x%x\n\n",
317 watermarks->dcn4x.b.uclk_pstate, watermarks->dcn4x.b.uclk_pstate);
318 } else if (watermarks->dcn4x.b.uclk_pstate
319 < hubbub2->watermarks.dcn4x.b.uclk_pstate)
320 wm_pending = true;
321
322 /* Section for UCLK_PSTATE_CHANGE_WATERMARKS1 (DUMMY_PSTATE/TEMP_READ/PPT) */
323 if (safe_to_lower || watermarks->dcn4x.a.temp_read_or_ppt
324 > hubbub2->watermarks.dcn4x.a.temp_read_or_ppt) {
325 hubbub2->watermarks.dcn4x.a.temp_read_or_ppt =
326 watermarks->dcn4x.a.temp_read_or_ppt;
327 REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A, 0,
328 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A, watermarks->dcn4x.a.temp_read_or_ppt);
329 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK1_A calculated =%d\n"
330 "HW register value = 0x%x\n\n",
331 watermarks->dcn4x.a.temp_read_or_ppt, watermarks->dcn4x.a.temp_read_or_ppt);
332 } else if (watermarks->dcn4x.a.temp_read_or_ppt
333 < hubbub2->watermarks.dcn4x.a.temp_read_or_ppt)
334 wm_pending = true;
335
336 /* clock state B */
337 if (safe_to_lower || watermarks->dcn4x.b.temp_read_or_ppt
338 > hubbub2->watermarks.dcn4x.b.temp_read_or_ppt) {
339 hubbub2->watermarks.dcn4x.b.temp_read_or_ppt =
340 watermarks->dcn4x.b.temp_read_or_ppt;
341 REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B, 0,
342 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B, watermarks->dcn4x.b.temp_read_or_ppt);
343 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK1_B calculated =%d\n"
344 "HW register value = 0x%x\n\n",
345 watermarks->dcn4x.b.temp_read_or_ppt, watermarks->dcn4x.b.temp_read_or_ppt);
346 } else if (watermarks->dcn4x.b.temp_read_or_ppt
347 < hubbub2->watermarks.dcn4x.b.temp_read_or_ppt)
348 wm_pending = true;
349
350 /* Section for FCLK_PSTATE_CHANGE_WATERMARKS */
351 /* clock state A */
352 if (safe_to_lower || watermarks->dcn4x.a.fclk_pstate
353 > hubbub2->watermarks.dcn4x.a.fclk_pstate) {
354 hubbub2->watermarks.dcn4x.a.fclk_pstate =
355 watermarks->dcn4x.a.fclk_pstate;
356 REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, 0,
357 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, watermarks->dcn4x.a.fclk_pstate);
358 DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_A calculated =%d\n"
359 "HW register value = 0x%x\n\n",
360 watermarks->dcn4x.a.fclk_pstate, watermarks->dcn4x.a.fclk_pstate);
361 } else if (watermarks->dcn4x.a.fclk_pstate
362 < hubbub2->watermarks.dcn4x.a.fclk_pstate)
363 wm_pending = true;
364
365 /* clock state B */
366 if (safe_to_lower || watermarks->dcn4x.b.fclk_pstate
367 > hubbub2->watermarks.dcn4x.b.fclk_pstate) {
368 hubbub2->watermarks.dcn4x.b.fclk_pstate =
369 watermarks->dcn4x.b.fclk_pstate;
370 REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, 0,
371 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, watermarks->dcn4x.b.fclk_pstate);
372 DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_B calculated =%d\n"
373 "HW register value = 0x%x\n\n",
374 watermarks->dcn4x.b.fclk_pstate, watermarks->dcn4x.b.fclk_pstate);
375 } else if (watermarks->dcn4x.b.fclk_pstate
376 < hubbub2->watermarks.dcn4x.b.fclk_pstate)
377 wm_pending = true;
378
379 /* Section for FCLK_CHANGE_WATERMARKS1 (DUMMY_PSTATE/TEMP_READ/PPT) */
380 if (safe_to_lower || watermarks->dcn4x.a.temp_read_or_ppt
381 > hubbub2->watermarks.dcn4x.a.temp_read_or_ppt) {
382 hubbub2->watermarks.dcn4x.a.temp_read_or_ppt =
383 watermarks->dcn4x.a.temp_read_or_ppt;
384 REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A, 0,
385 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A, watermarks->dcn4x.a.temp_read_or_ppt);
386 DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK1_A calculated =%d\n"
387 "HW register value = 0x%x\n\n",
388 watermarks->dcn4x.a.temp_read_or_ppt, watermarks->dcn4x.a.temp_read_or_ppt);
389 } else if (watermarks->dcn4x.a.temp_read_or_ppt
390 < hubbub2->watermarks.dcn4x.a.temp_read_or_ppt)
391 wm_pending = true;
392
393 /* clock state B */
394 if (safe_to_lower || watermarks->dcn4x.b.temp_read_or_ppt
395 > hubbub2->watermarks.dcn4x.b.temp_read_or_ppt) {
396 hubbub2->watermarks.dcn4x.b.temp_read_or_ppt =
397 watermarks->dcn4x.b.temp_read_or_ppt;
398 REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B, 0,
399 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B, watermarks->dcn4x.b.temp_read_or_ppt);
400 DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK1_B calculated =%d\n"
401 "HW register value = 0x%x\n\n",
402 watermarks->dcn4x.b.temp_read_or_ppt, watermarks->dcn4x.b.temp_read_or_ppt);
403 } else if (watermarks->dcn4x.b.temp_read_or_ppt
404 < hubbub2->watermarks.dcn4x.b.temp_read_or_ppt)
405 wm_pending = true;
406
407 return wm_pending;
408 }
409
410
hubbub401_program_usr_watermarks(struct hubbub * hubbub,union dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)411 bool hubbub401_program_usr_watermarks(
412 struct hubbub *hubbub,
413 union dcn_watermark_set *watermarks,
414 unsigned int refclk_mhz,
415 bool safe_to_lower)
416 {
417 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
418 bool wm_pending = false;
419
420 /* clock state A */
421 if (safe_to_lower || watermarks->dcn4x.a.usr
422 > hubbub2->watermarks.dcn4x.a.usr) {
423 hubbub2->watermarks.dcn4x.a.usr = watermarks->dcn4x.a.usr;
424 REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, 0,
425 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, watermarks->dcn4x.a.usr);
426 DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_A calculated =%d\n"
427 "HW register value = 0x%x\n\n",
428 watermarks->dcn4x.a.usr, watermarks->dcn4x.a.usr);
429 } else if (watermarks->dcn4x.a.usr
430 < hubbub2->watermarks.dcn4x.a.usr)
431 wm_pending = true;
432
433 /* clock state B */
434 if (safe_to_lower || watermarks->dcn4x.b.usr
435 > hubbub2->watermarks.dcn4x.b.usr) {
436 hubbub2->watermarks.dcn4x.b.usr = watermarks->dcn4x.b.usr;
437 REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, 0,
438 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, watermarks->dcn4x.b.usr);
439 DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_B calculated =%d\n"
440 "HW register value = 0x%x\n\n",
441 watermarks->dcn4x.b.usr, watermarks->dcn4x.b.usr);
442 } else if (watermarks->dcn4x.b.usr
443 < hubbub2->watermarks.dcn4x.b.usr)
444 wm_pending = true;
445
446 return wm_pending;
447 }
448
449
hubbub401_program_watermarks(struct hubbub * hubbub,union dcn_watermark_set * watermarks,unsigned int refclk_mhz,bool safe_to_lower)450 static bool hubbub401_program_watermarks(
451 struct hubbub *hubbub,
452 union dcn_watermark_set *watermarks,
453 unsigned int refclk_mhz,
454 bool safe_to_lower)
455 {
456 bool wm_pending = false;
457
458 if (hubbub401_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
459 wm_pending = true;
460
461 if (hubbub401_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
462 wm_pending = true;
463
464 if (hubbub401_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
465 wm_pending = true;
466
467 if (hubbub401_program_usr_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
468 wm_pending = true;
469
470 /*
471 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
472 * If the memory controller is fully utilized and the DCHub requestors are
473 * well ahead of their amortized schedule, then it is safe to prevent the next winner
474 * from being committed and sent to the fabric.
475 * The utilization of the memory controller is approximated by ensuring that
476 * the number of outstanding requests is greater than a threshold specified
477 * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized
478 * schedule, the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
479 *
480 * TODO: Revisit request limit after figure out right number. request limit for RM isn't decided yet,
481 * set maximum value (0x1FF) to turn off it for now.
482 */
483 /*REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
484 DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
485 REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
486 DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);
487 */
488
489 hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
490
491 hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
492
493 return wm_pending;
494 }
495
496 /* Copy values from WM set A to all other sets */
hubbub401_init_watermarks(struct hubbub * hubbub)497 static void hubbub401_init_watermarks(struct hubbub *hubbub)
498 {
499 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
500 uint32_t reg;
501
502 reg = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
503 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, reg);
504
505 reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A);
506 REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, reg);
507
508 reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A);
509 REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, reg);
510
511 reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_MALL_A);
512 REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_MALL_B, reg);
513
514 reg = REG_READ(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A);
515 REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, reg);
516
517 reg = REG_READ(DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A);
518 REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B, reg);
519
520 reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
521 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, reg);
522 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A, reg);
523 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B, reg);
524 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A, reg);
525 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B, reg);
526 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A, reg);
527 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B, reg);
528
529 reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
530 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, reg);
531 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A, reg);
532 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B, reg);
533 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A, reg);
534 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B, reg);
535 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A, reg);
536 REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B, reg);
537
538 reg = REG_READ(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A);
539 REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, reg);
540
541 reg = REG_READ(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A);
542 REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, reg);
543 reg = REG_READ(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A);
544 REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B, reg);
545
546 reg = REG_READ(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A);
547 REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, reg);
548 reg = REG_READ(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A);
549 REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B, reg);
550 }
551
hubbub401_wm_read_state(struct hubbub * hubbub,struct dcn_hubbub_wm * wm)552 static void hubbub401_wm_read_state(struct hubbub *hubbub,
553 struct dcn_hubbub_wm *wm)
554 {
555 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
556 struct dcn_hubbub_wm_set *s;
557
558 memset(wm, 0, sizeof(struct dcn_hubbub_wm));
559
560 s = &wm->sets[0];
561 s->wm_set = 0;
562 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A,
563 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent);
564
565 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A,
566 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter);
567
568 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A,
569 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
570
571 REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A,
572 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_change);
573
574 REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A,
575 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, &s->usr_retrain);
576
577 REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A,
578 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, &s->fclk_pstate_change);
579
580 s = &wm->sets[1];
581 s->wm_set = 1;
582 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B,
583 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent);
584
585 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B,
586 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter);
587
588 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B,
589 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
590
591 REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B,
592 DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_change);
593
594 REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B,
595 DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, &s->usr_retrain);
596
597 REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B,
598 DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, &s->fclk_pstate_change);
599 }
600
hubbub401_dcc_support_swizzle(enum swizzle_mode_addr3_values swizzle,unsigned int plane_pitch,unsigned int bytes_per_element,enum segment_order * segment_order_horz,enum segment_order * segment_order_vert)601 bool hubbub401_dcc_support_swizzle(
602 enum swizzle_mode_addr3_values swizzle,
603 unsigned int plane_pitch,
604 unsigned int bytes_per_element,
605 enum segment_order *segment_order_horz,
606 enum segment_order *segment_order_vert)
607 {
608 bool swizzle_supported = false;
609
610 switch (swizzle) {
611 case DC_ADDR3_SW_LINEAR:
612 if ((plane_pitch * bytes_per_element) % 256 == 0)
613 swizzle_supported = true;
614 break;
615 case DC_ADDR3_SW_64KB_2D:
616 case DC_ADDR3_SW_256KB_2D:
617 swizzle_supported = true;
618 break;
619 default:
620 swizzle_supported = false;
621 break;
622 }
623
624 if (swizzle_supported) {
625 if (bytes_per_element == 1) {
626 *segment_order_horz = segment_order__contiguous;
627 *segment_order_vert = segment_order__non_contiguous;
628 return true;
629 }
630 if (bytes_per_element == 2) {
631 *segment_order_horz = segment_order__non_contiguous;
632 *segment_order_vert = segment_order__contiguous;
633 return true;
634 }
635 if (bytes_per_element == 4) {
636 *segment_order_horz = segment_order__contiguous;
637 *segment_order_vert = segment_order__non_contiguous;
638 return true;
639 }
640 if (bytes_per_element == 8) {
641 *segment_order_horz = segment_order__contiguous;
642 *segment_order_vert = segment_order__non_contiguous;
643 return true;
644 }
645 }
646
647 return false;
648 }
649
hubbub401_dcc_support_pixel_format(enum surface_pixel_format format,unsigned int * plane0_bpe,unsigned int * plane1_bpe)650 bool hubbub401_dcc_support_pixel_format(
651 enum surface_pixel_format format,
652 unsigned int *plane0_bpe,
653 unsigned int *plane1_bpe)
654 {
655 switch (format) {
656 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
657 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
658 *plane0_bpe = 2;
659 *plane1_bpe = 0;
660 return true;
661 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
662 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
663 *plane0_bpe = 1;
664 *plane1_bpe = 2;
665 return true;
666 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
667 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
668 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
669 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
670 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
671 case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
672 case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
673 case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
674 case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
675 case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
676 *plane0_bpe = 4;
677 *plane1_bpe = 0;
678 return true;
679 case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
680 *plane0_bpe = 4;
681 *plane1_bpe = 1;
682 return true;
683 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
684 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
685 *plane0_bpe = 2;
686 *plane1_bpe = 4;
687 return true;
688 case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
689 case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102:
690 case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
691 *plane0_bpe = 4;
692 *plane1_bpe = 0;
693 return true;
694 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
695 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
696 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
697 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
698 *plane0_bpe = 8;
699 *plane1_bpe = 0;
700 return true;
701 default:
702 return false;
703 }
704 }
705
hubbub401_get_blk256_size(unsigned int * blk256_width,unsigned int * blk256_height,unsigned int bytes_per_element)706 void hubbub401_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
707 unsigned int bytes_per_element)
708 {
709 if (bytes_per_element == 1) {
710 *blk256_width = 16;
711 *blk256_height = 16;
712 } else if (bytes_per_element == 2) {
713 *blk256_width = 16;
714 *blk256_height = 8;
715 } else if (bytes_per_element == 4) {
716 *blk256_width = 8;
717 *blk256_height = 8;
718 } else if (bytes_per_element == 8) {
719 *blk256_width = 8;
720 *blk256_height = 4;
721 }
722 }
723
hubbub401_det_request_size(unsigned int detile_buf_size,enum surface_pixel_format format,unsigned int p0_height,unsigned int p0_width,unsigned int p0_bpe,unsigned int p1_height,unsigned int p1_width,unsigned int p1_bpe,bool * p0_req128_horz_wc,bool * p0_req128_vert_wc,bool * p1_req128_horz_wc,bool * p1_req128_vert_wc)724 void hubbub401_det_request_size(
725 unsigned int detile_buf_size,
726 enum surface_pixel_format format,
727 unsigned int p0_height,
728 unsigned int p0_width,
729 unsigned int p0_bpe,
730 unsigned int p1_height,
731 unsigned int p1_width,
732 unsigned int p1_bpe,
733 bool *p0_req128_horz_wc,
734 bool *p0_req128_vert_wc,
735 bool *p1_req128_horz_wc,
736 bool *p1_req128_vert_wc)
737 {
738 unsigned int blk256_height = 0;
739 unsigned int blk256_width = 0;
740 unsigned int p0_swath_bytes_horz_wc, p0_swath_bytes_vert_wc;
741 unsigned int p1_swath_bytes_horz_wc, p1_swath_bytes_vert_wc;
742
743 //For plane0
744 hubbub401_get_blk256_size(&blk256_width, &blk256_height, p0_bpe);
745
746 p0_swath_bytes_horz_wc = p0_width * blk256_height * p0_bpe;
747 p0_swath_bytes_vert_wc = p0_height * blk256_width * p0_bpe;
748
749 *p0_req128_horz_wc = (2 * p0_swath_bytes_horz_wc <= detile_buf_size) ?
750 false : /* full 256B request */
751 true; /* half 128b request */
752
753 *p0_req128_vert_wc = (2 * p0_swath_bytes_vert_wc <= detile_buf_size) ?
754 false : /* full 256B request */
755 true; /* half 128b request */
756
757 /*For dual planes needs to be considered together */
758 if (p1_bpe) {
759 hubbub401_get_blk256_size(&blk256_width, &blk256_height, p1_bpe);
760
761 p1_swath_bytes_horz_wc = p1_width * blk256_height * p1_bpe;
762 p1_swath_bytes_vert_wc = p1_height * blk256_width * p1_bpe;
763
764 switch (format) {
765 default:
766 /* No any adjustment needed*/
767 break;
768 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
769 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
770 /* Packing at the ratio of 3:2 is supported before the detile buffer
771 * for YUV420 video with 10bpc (P010). Need to adjust for that.
772 */
773 p0_swath_bytes_horz_wc = (((p0_swath_bytes_horz_wc * 2) / 3 + 255) / 256) * 256;
774 p0_swath_bytes_vert_wc = (((p0_swath_bytes_vert_wc * 2) / 3 + 255) / 256) * 256;
775 p1_swath_bytes_horz_wc = (((p1_swath_bytes_horz_wc * 2) / 3 + 255) / 256) * 256;
776 p1_swath_bytes_vert_wc = (((p1_swath_bytes_vert_wc * 2) / 3 + 255) / 256) * 256;
777 break;
778 }
779
780 *p0_req128_horz_wc = *p1_req128_horz_wc = (2 * p0_swath_bytes_horz_wc +
781 2 * p1_swath_bytes_horz_wc <= detile_buf_size) ?
782 false : /* full 256B request */
783 true; /* half 128B request */
784
785 *p0_req128_vert_wc = *p1_req128_vert_wc = (2 * p0_swath_bytes_vert_wc +
786 2 * p1_swath_bytes_vert_wc <= detile_buf_size) ?
787 false : /* full 256B request */
788 true; /* half 128B request */
789
790 /* If 128B requests are true, meaning 2 full swaths of data cannot fit
791 * in de-tile buffer, check if one plane can use 256B request while
792 * the other plane is using 128B requests
793 */
794 if (*p0_req128_horz_wc) {
795 // If ratio around 1:1 between p0 and p1 try to recalulate if p0 can use 256B
796 if (p0_swath_bytes_horz_wc <= p1_swath_bytes_horz_wc + p1_swath_bytes_horz_wc / 2) {
797
798 *p0_req128_horz_wc = (2 * p0_swath_bytes_horz_wc + p1_swath_bytes_horz_wc <= detile_buf_size) ?
799 false : /* full 256B request */
800 true; /* half 128b request */
801
802 } else {
803 /* ratio about 2:1 between p0 and p1, try to recalulate if p1 can use 256B */
804 *p1_req128_horz_wc = (p0_swath_bytes_horz_wc + 2 * p1_swath_bytes_horz_wc <= detile_buf_size) ?
805 false : /* full 256B request */
806 true; /* half 128b request */
807 }
808 }
809
810 if (*p0_req128_vert_wc) {
811 // If ratio around 1:1 between p0 and p1 try to recalulate if p0 can use 256B
812 if (p0_swath_bytes_vert_wc <= p1_swath_bytes_vert_wc + p1_swath_bytes_vert_wc / 2) {
813
814 *p0_req128_vert_wc = (2 * p0_swath_bytes_vert_wc + p1_swath_bytes_vert_wc <= detile_buf_size) ?
815 false : /* full 256B request */
816 true; /* half 128b request */
817
818 } else {
819 /* ratio about 2:1 between p0 and p1, try to recalulate if p1 can use 256B */
820 *p1_req128_vert_wc = (p0_swath_bytes_vert_wc + 2 * p1_swath_bytes_vert_wc <= detile_buf_size) ?
821 false : /* full 256B request */
822 true; /* half 128b request */
823 }
824 }
825 }
826 }
hubbub401_get_dcc_compression_cap(struct hubbub * hubbub,const struct dc_dcc_surface_param * input,struct dc_surface_dcc_cap * output)827 bool hubbub401_get_dcc_compression_cap(struct hubbub *hubbub,
828 const struct dc_dcc_surface_param *input,
829 struct dc_surface_dcc_cap *output)
830 {
831 struct dc *dc = hubbub->ctx->dc;
832 const unsigned int max_dcc_plane_width = dc->caps.dcc_plane_width_limit;
833 /* DCN4_Programming_Guide_DCHUB.docx, Section 5.11.2.2 */
834 enum dcc_control dcc_control;
835 unsigned int plane0_bpe, plane1_bpe;
836 enum segment_order segment_order_horz, segment_order_vert;
837 enum segment_order p1_segment_order_horz, p1_segment_order_vert;
838 bool req128_horz_wc, req128_vert_wc;
839 unsigned int plane0_width = 0, plane0_height = 0, plane1_width = 0, plane1_height = 0;
840 bool p1_req128_horz_wc, p1_req128_vert_wc, is_dual_plane;
841
842 memset(output, 0, sizeof(*output));
843
844 if (dc->debug.disable_dcc == DCC_DISABLE)
845 return false;
846
847 /* Conservatively disable DCC for cases where ODM4:1 may be required. */
848 if (max_dcc_plane_width != 0 &&
849 (input->surface_size.width > max_dcc_plane_width || input->plane1_size.width > max_dcc_plane_width))
850 return false;
851
852 switch (input->format) {
853 default:
854 is_dual_plane = false;
855
856 plane1_width = 0;
857 plane1_height = 0;
858
859 if (input->surface_size.width > 6144 + 16)
860 plane0_width = 6160;
861 else
862 plane0_width = input->surface_size.width;
863
864 if (input->surface_size.height > 6144 + 16)
865 plane0_height = 6160;
866 else
867 plane0_height = input->surface_size.height;
868
869 break;
870 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
871 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
872 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
873 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
874 is_dual_plane = true;
875
876 if (input->surface_size.width > 7680 + 16)
877 plane0_width = 7696;
878 else
879 plane0_width = input->surface_size.width;
880
881 if (input->surface_size.height > 4320 + 16)
882 plane0_height = 4336;
883 else
884 plane0_height = input->surface_size.height;
885
886 if (input->plane1_size.width > 7680 + 16)
887 plane1_width = 7696 / 2;
888 else
889 plane1_width = input->plane1_size.width;
890
891 if (input->plane1_size.height > 4320 + 16)
892 plane1_height = 4336 / 2;
893 else
894 plane1_height = input->plane1_size.height;
895
896 break;
897
898 case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
899 is_dual_plane = true;
900
901 if (input->surface_size.width > 5120 + 16)
902 plane0_width = 5136;
903 else
904 plane0_width = input->surface_size.width;
905
906 if (input->surface_size.height > 5120 + 16)
907 plane0_height = 5136;
908 else
909 plane0_height = input->surface_size.height;
910
911 if (input->plane1_size.width > 5120 + 16)
912 plane1_width = 5136;
913 else
914 plane1_width = input->plane1_size.width;
915
916 if (input->plane1_size.height > 5120 + 16)
917 plane1_height = 5136;
918 else
919 plane1_height = input->plane1_size.height;
920
921 break;
922 }
923
924 if (!hubbub->funcs->dcc_support_pixel_format_plane0_plane1(input->format,
925 &plane0_bpe, &plane1_bpe))
926 return false;
927
928 /* Find plane0 DCC Controls */
929 if (!is_dual_plane) {
930
931 if (!hubbub->funcs->dcc_support_swizzle_addr3(input->swizzle_mode_addr3,
932 input->plane0_pitch, plane0_bpe,
933 &segment_order_horz, &segment_order_vert))
934 return false;
935
936 hubbub401_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size, input->format,
937 plane0_height, plane0_width, plane0_bpe,
938 plane1_height, plane1_width, plane1_bpe,
939 &req128_horz_wc, &req128_vert_wc, &p1_req128_horz_wc, &p1_req128_vert_wc);
940
941 if (!req128_horz_wc && !req128_vert_wc) {
942 dcc_control = dcc_control__256_256;
943 } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
944 if (!req128_horz_wc)
945 dcc_control = dcc_control__256_256;
946 else if (segment_order_horz == segment_order__contiguous)
947 dcc_control = dcc_control__256_128;
948 else
949 dcc_control = dcc_control__256_64;
950 } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
951 if (!req128_vert_wc)
952 dcc_control = dcc_control__256_256;
953 else if (segment_order_vert == segment_order__contiguous)
954 dcc_control = dcc_control__256_128;
955 else
956 dcc_control = dcc_control__256_64;
957 } else {
958 if ((req128_horz_wc &&
959 segment_order_horz == segment_order__non_contiguous) ||
960 (req128_vert_wc &&
961 segment_order_vert == segment_order__non_contiguous))
962 /* access_dir not known, must use most constraining */
963 dcc_control = dcc_control__256_64;
964 else
965 /* req128 is true for either horz and vert
966 * but segment_order is contiguous
967 */
968 dcc_control = dcc_control__256_128;
969 }
970
971 if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
972 dcc_control != dcc_control__256_256)
973 return false;
974
975 switch (dcc_control) {
976 case dcc_control__256_256:
977 output->grph.rgb.dcc_controls.dcc_256_256 = 1;
978 output->grph.rgb.dcc_controls.dcc_256_128 = 1;
979 output->grph.rgb.dcc_controls.dcc_256_64 = 1;
980 break;
981 case dcc_control__256_128:
982 output->grph.rgb.dcc_controls.dcc_256_128 = 1;
983 output->grph.rgb.dcc_controls.dcc_256_64 = 1;
984 break;
985 case dcc_control__256_64:
986 output->grph.rgb.dcc_controls.dcc_256_64 = 1;
987 break;
988 default:
989 /* Shouldn't get here */
990 ASSERT(0);
991 break;
992 }
993 } else {
994 /* For dual plane cases, need to examine both planes together */
995 if (!hubbub->funcs->dcc_support_swizzle_addr3(input->swizzle_mode_addr3,
996 input->plane0_pitch, plane0_bpe,
997 &segment_order_horz, &segment_order_vert))
998 return false;
999
1000 if (!hubbub->funcs->dcc_support_swizzle_addr3(input->swizzle_mode_addr3,
1001 input->plane1_pitch, plane1_bpe,
1002 &p1_segment_order_horz, &p1_segment_order_vert))
1003 return false;
1004
1005 hubbub401_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size, input->format,
1006 plane0_height, plane0_width, plane0_bpe,
1007 plane1_height, plane1_width, plane1_bpe,
1008 &req128_horz_wc, &req128_vert_wc, &p1_req128_horz_wc, &p1_req128_vert_wc);
1009
1010 /* Determine Plane 0 DCC Controls */
1011 if (!req128_horz_wc && !req128_vert_wc) {
1012 dcc_control = dcc_control__256_256;
1013 } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
1014 if (!req128_horz_wc)
1015 dcc_control = dcc_control__256_256;
1016 else if (segment_order_horz == segment_order__contiguous)
1017 dcc_control = dcc_control__256_128;
1018 else
1019 dcc_control = dcc_control__256_64;
1020 } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
1021 if (!req128_vert_wc)
1022 dcc_control = dcc_control__256_256;
1023 else if (segment_order_vert == segment_order__contiguous)
1024 dcc_control = dcc_control__256_128;
1025 else
1026 dcc_control = dcc_control__256_64;
1027 } else {
1028 if ((req128_horz_wc &&
1029 segment_order_horz == segment_order__non_contiguous) ||
1030 (req128_vert_wc &&
1031 segment_order_vert == segment_order__non_contiguous))
1032 /* access_dir not known, must use most constraining */
1033 dcc_control = dcc_control__256_64;
1034 else
1035 /* req128 is true for either horz and vert
1036 * but segment_order is contiguous
1037 */
1038 dcc_control = dcc_control__256_128;
1039 }
1040
1041 switch (dcc_control) {
1042 case dcc_control__256_256:
1043 output->video.luma.dcc_controls.dcc_256_256 = 1;
1044 output->video.luma.dcc_controls.dcc_256_128 = 1;
1045 output->video.luma.dcc_controls.dcc_256_64 = 1;
1046 break;
1047 case dcc_control__256_128:
1048 output->video.luma.dcc_controls.dcc_256_128 = 1;
1049 output->video.luma.dcc_controls.dcc_256_64 = 1;
1050 break;
1051 case dcc_control__256_64:
1052 output->video.luma.dcc_controls.dcc_256_64 = 1;
1053 break;
1054 default:
1055 ASSERT(0);
1056 break;
1057 }
1058
1059 /* Determine Plane 1 DCC Controls */
1060 if (!p1_req128_horz_wc && !p1_req128_vert_wc) {
1061 dcc_control = dcc_control__256_256;
1062 } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
1063 if (!p1_req128_horz_wc)
1064 dcc_control = dcc_control__256_256;
1065 else if (p1_segment_order_horz == segment_order__contiguous)
1066 dcc_control = dcc_control__256_128;
1067 else
1068 dcc_control = dcc_control__256_64;
1069 } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
1070 if (!p1_req128_vert_wc)
1071 dcc_control = dcc_control__256_256;
1072 else if (p1_segment_order_vert == segment_order__contiguous)
1073 dcc_control = dcc_control__256_128;
1074 else
1075 dcc_control = dcc_control__256_64;
1076 } else {
1077 if ((p1_req128_horz_wc &&
1078 p1_segment_order_horz == segment_order__non_contiguous) ||
1079 (p1_req128_vert_wc &&
1080 p1_segment_order_vert == segment_order__non_contiguous))
1081 /* access_dir not known, must use most constraining */
1082 dcc_control = dcc_control__256_64;
1083 else
1084 /* req128 is true for either horz and vert
1085 * but segment_order is contiguous
1086 */
1087 dcc_control = dcc_control__256_128;
1088 }
1089
1090 switch (dcc_control) {
1091 case dcc_control__256_256:
1092 output->video.chroma.dcc_controls.dcc_256_256 = 1;
1093 output->video.chroma.dcc_controls.dcc_256_128 = 1;
1094 output->video.chroma.dcc_controls.dcc_256_64 = 1;
1095 break;
1096 case dcc_control__256_128:
1097 output->video.chroma.dcc_controls.dcc_256_128 = 1;
1098 output->video.chroma.dcc_controls.dcc_256_64 = 1;
1099 break;
1100 case dcc_control__256_64:
1101 output->video.chroma.dcc_controls.dcc_256_64 = 1;
1102 break;
1103 default:
1104 ASSERT(0);
1105 break;
1106 }
1107 }
1108
1109 output->capable = true;
1110 return true;
1111 }
1112
dcn401_program_det_segments(struct hubbub * hubbub,int hubp_inst,unsigned det_buffer_size_seg)1113 static void dcn401_program_det_segments(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg)
1114 {
1115 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
1116
1117 switch (hubp_inst) {
1118 case 0:
1119 REG_UPDATE(DCHUBBUB_DET0_CTRL,
1120 DET0_SIZE, det_buffer_size_seg);
1121 hubbub2->det0_size = det_buffer_size_seg;
1122 break;
1123 case 1:
1124 REG_UPDATE(DCHUBBUB_DET1_CTRL,
1125 DET1_SIZE, det_buffer_size_seg);
1126 hubbub2->det1_size = det_buffer_size_seg;
1127 break;
1128 case 2:
1129 REG_UPDATE(DCHUBBUB_DET2_CTRL,
1130 DET2_SIZE, det_buffer_size_seg);
1131 hubbub2->det2_size = det_buffer_size_seg;
1132 break;
1133 case 3:
1134 REG_UPDATE(DCHUBBUB_DET3_CTRL,
1135 DET3_SIZE, det_buffer_size_seg);
1136 hubbub2->det3_size = det_buffer_size_seg;
1137 break;
1138 default:
1139 break;
1140 }
1141 if (hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
1142 + hubbub2->det3_size + hubbub2->compbuf_size_segments > hubbub2->crb_size_segs) {
1143 /* This may happen during seamless transition from ODM 2:1 to ODM4:1 */
1144 DC_LOG_WARNING("CRB Config Warning: DET size (%d,%d,%d,%d) + Compbuf size (%d) > CRB segments (%d)\n",
1145 hubbub2->det0_size, hubbub2->det1_size, hubbub2->det2_size, hubbub2->det3_size,
1146 hubbub2->compbuf_size_segments, hubbub2->crb_size_segs);
1147 }
1148 }
1149
dcn401_program_compbuf_segments(struct hubbub * hubbub,unsigned compbuf_size_seg,bool safe_to_increase)1150 static void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase)
1151 {
1152 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
1153
1154 unsigned int cur_compbuf_size_seg = 0;
1155
1156 if (safe_to_increase || compbuf_size_seg <= hubbub2->compbuf_size_segments) {
1157 if (compbuf_size_seg > hubbub2->compbuf_size_segments) {
1158 REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100);
1159 REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100);
1160 REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100);
1161 REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100);
1162 }
1163 /* Should never be hit, if it is we have an erroneous hw config*/
1164 ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size
1165 + hubbub2->det3_size + compbuf_size_seg <= hubbub2->crb_size_segs);
1166 REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_seg);
1167 hubbub2->compbuf_size_segments = compbuf_size_seg;
1168
1169 ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &cur_compbuf_size_seg) && !cur_compbuf_size_seg);
1170 }
1171 }
1172
dcn401_wait_for_det_update(struct hubbub * hubbub,int hubp_inst)1173 static void dcn401_wait_for_det_update(struct hubbub *hubbub, int hubp_inst)
1174 {
1175 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
1176
1177 switch (hubp_inst) {
1178 case 0:
1179 REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100000); /* 1 vupdate at 10hz */
1180 break;
1181 case 1:
1182 REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100000);
1183 break;
1184 case 2:
1185 REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100000);
1186 break;
1187 case 3:
1188 REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100000);
1189 break;
1190 default:
1191 break;
1192 }
1193 }
1194
1195 static const struct hubbub_funcs hubbub4_01_funcs = {
1196 .update_dchub = hubbub2_update_dchub,
1197 .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
1198 .init_vm_ctx = hubbub2_init_vm_ctx,
1199 .dcc_support_swizzle_addr3 = hubbub401_dcc_support_swizzle,
1200 .dcc_support_pixel_format_plane0_plane1 = hubbub401_dcc_support_pixel_format,
1201 .get_dcc_compression_cap = hubbub401_get_dcc_compression_cap,
1202 .wm_read_state = hubbub401_wm_read_state,
1203 .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
1204 .program_watermarks = hubbub401_program_watermarks,
1205 .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
1206 .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled,
1207 .verify_allow_pstate_change_high = NULL,
1208 .force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes,
1209 .force_pstate_change_control = hubbub3_force_pstate_change_control,
1210 .init_watermarks = hubbub401_init_watermarks,
1211 .init_crb = dcn401_init_crb,
1212 .hubbub_read_state = hubbub2_read_state,
1213 .force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
1214 .set_request_limit = hubbub32_set_request_limit,
1215 .program_det_segments = dcn401_program_det_segments,
1216 .program_compbuf_segments = dcn401_program_compbuf_segments,
1217 .wait_for_det_update = dcn401_wait_for_det_update,
1218 };
1219
hubbub401_construct(struct dcn20_hubbub * hubbub2,struct dc_context * ctx,const struct dcn_hubbub_registers * hubbub_regs,const struct dcn_hubbub_shift * hubbub_shift,const struct dcn_hubbub_mask * hubbub_mask,int det_size_kb,int pixel_chunk_size_kb,int config_return_buffer_size_kb)1220 void hubbub401_construct(struct dcn20_hubbub *hubbub2,
1221 struct dc_context *ctx,
1222 const struct dcn_hubbub_registers *hubbub_regs,
1223 const struct dcn_hubbub_shift *hubbub_shift,
1224 const struct dcn_hubbub_mask *hubbub_mask,
1225 int det_size_kb,
1226 int pixel_chunk_size_kb,
1227 int config_return_buffer_size_kb)
1228 {
1229 hubbub2->base.ctx = ctx;
1230 hubbub2->base.funcs = &hubbub4_01_funcs;
1231 hubbub2->regs = hubbub_regs;
1232 hubbub2->shifts = hubbub_shift;
1233 hubbub2->masks = hubbub_mask;
1234
1235 hubbub2->detile_buf_size = det_size_kb * 1024;
1236 hubbub2->pixel_chunk_size = pixel_chunk_size_kb * 1024;
1237 hubbub2->crb_size_segs = config_return_buffer_size_kb / DCN4_01_CRB_SEGMENT_SIZE_KB;
1238 }
1239