1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "i9xx_wm.h"
9 #include "intel_atomic.h"
10 #include "intel_bo.h"
11 #include "intel_display.h"
12 #include "intel_display_trace.h"
13 #include "intel_fb.h"
14 #include "intel_mchbar_regs.h"
15 #include "intel_wm.h"
16 #include "skl_watermark.h"
17 #include "vlv_sideband.h"
18
19 struct intel_watermark_params {
20 u16 fifo_size;
21 u16 max_wm;
22 u8 default_wm;
23 u8 guard_size;
24 u8 cacheline_size;
25 };
26
27 /* used in computing the new watermarks state */
28 struct intel_wm_config {
29 unsigned int num_pipes_active;
30 bool sprites_enabled;
31 bool sprites_scaled;
32 };
33
34 struct cxsr_latency {
35 bool is_desktop : 1;
36 bool is_ddr3 : 1;
37 u16 fsb_freq;
38 u16 mem_freq;
39 u16 display_sr;
40 u16 display_hpll_disable;
41 u16 cursor_sr;
42 u16 cursor_hpll_disable;
43 };
44
45 static const struct cxsr_latency cxsr_latency_table[] = {
46 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
47 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
48 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
49 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
50 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
51
52 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
53 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
54 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
55 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
56 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
57
58 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
59 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
60 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
61 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
62 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
63
64 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
65 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
66 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
67 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
68 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
69
70 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
71 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
72 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
73 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
74 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
75
76 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
77 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
78 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
79 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
80 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
81 };
82
pnv_get_cxsr_latency(struct drm_i915_private * i915)83 static const struct cxsr_latency *pnv_get_cxsr_latency(struct drm_i915_private *i915)
84 {
85 int i;
86
87 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
88 const struct cxsr_latency *latency = &cxsr_latency_table[i];
89 bool is_desktop = !IS_MOBILE(i915);
90
91 if (is_desktop == latency->is_desktop &&
92 i915->is_ddr3 == latency->is_ddr3 &&
93 DIV_ROUND_CLOSEST(i915->fsb_freq, 1000) == latency->fsb_freq &&
94 DIV_ROUND_CLOSEST(i915->mem_freq, 1000) == latency->mem_freq)
95 return latency;
96 }
97
98 drm_dbg_kms(&i915->drm,
99 "Could not find CxSR latency for DDR%s, FSB %u kHz, MEM %u kHz\n",
100 i915->is_ddr3 ? "3" : "2", i915->fsb_freq, i915->mem_freq);
101
102 return NULL;
103 }
104
chv_set_memory_dvfs(struct drm_i915_private * dev_priv,bool enable)105 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable)
106 {
107 u32 val;
108
109 vlv_punit_get(dev_priv);
110
111 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
112 if (enable)
113 val &= ~FORCE_DDR_HIGH_FREQ;
114 else
115 val |= FORCE_DDR_HIGH_FREQ;
116 val &= ~FORCE_DDR_LOW_FREQ;
117 val |= FORCE_DDR_FREQ_REQ_ACK;
118 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
119
120 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
121 FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
122 drm_err(&dev_priv->drm,
123 "timed out waiting for Punit DDR DVFS request\n");
124
125 vlv_punit_put(dev_priv);
126 }
127
chv_set_memory_pm5(struct drm_i915_private * dev_priv,bool enable)128 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
129 {
130 u32 val;
131
132 vlv_punit_get(dev_priv);
133
134 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
135 if (enable)
136 val |= DSP_MAXFIFO_PM5_ENABLE;
137 else
138 val &= ~DSP_MAXFIFO_PM5_ENABLE;
139 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
140
141 vlv_punit_put(dev_priv);
142 }
143
144 #define FW_WM(value, plane) \
145 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
146
_intel_set_memory_cxsr(struct drm_i915_private * dev_priv,bool enable)147 static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
148 {
149 struct intel_display *display = &dev_priv->display;
150 bool was_enabled;
151 u32 val;
152
153 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
154 was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
155 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
156 intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV);
157 } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
158 was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
159 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
160 intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
161 } else if (IS_PINEVIEW(dev_priv)) {
162 val = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
163 was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
164 if (enable)
165 val |= PINEVIEW_SELF_REFRESH_EN;
166 else
167 val &= ~PINEVIEW_SELF_REFRESH_EN;
168 intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), val);
169 intel_uncore_posting_read(&dev_priv->uncore, DSPFW3(dev_priv));
170 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
171 was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
172 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
173 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
174 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val);
175 intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF);
176 } else if (IS_I915GM(dev_priv)) {
177 /*
178 * FIXME can't find a bit like this for 915G, and
179 * yet it does have the related watermark in
180 * FW_BLC_SELF. What's going on?
181 */
182 was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN;
183 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
184 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
185 intel_uncore_write(&dev_priv->uncore, INSTPM, val);
186 intel_uncore_posting_read(&dev_priv->uncore, INSTPM);
187 } else {
188 return false;
189 }
190
191 trace_intel_memory_cxsr(display, was_enabled, enable);
192
193 drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n",
194 str_enabled_disabled(enable),
195 str_enabled_disabled(was_enabled));
196
197 return was_enabled;
198 }
199
200 /**
201 * intel_set_memory_cxsr - Configure CxSR state
202 * @dev_priv: i915 device
203 * @enable: Allow vs. disallow CxSR
204 *
205 * Allow or disallow the system to enter a special CxSR
206 * (C-state self refresh) state. What typically happens in CxSR mode
207 * is that several display FIFOs may get combined into a single larger
208 * FIFO for a particular plane (so called max FIFO mode) to allow the
209 * system to defer memory fetches longer, and the memory will enter
210 * self refresh.
211 *
212 * Note that enabling CxSR does not guarantee that the system enter
213 * this special mode, nor does it guarantee that the system stays
214 * in that mode once entered. So this just allows/disallows the system
215 * to autonomously utilize the CxSR mode. Other factors such as core
216 * C-states will affect when/if the system actually enters/exits the
217 * CxSR mode.
218 *
219 * Note that on VLV/CHV this actually only controls the max FIFO mode,
220 * and the system is free to enter/exit memory self refresh at any time
221 * even when the use of CxSR has been disallowed.
222 *
223 * While the system is actually in the CxSR/max FIFO mode, some plane
224 * control registers will not get latched on vblank. Thus in order to
225 * guarantee the system will respond to changes in the plane registers
226 * we must always disallow CxSR prior to making changes to those registers.
227 * Unfortunately the system will re-evaluate the CxSR conditions at
228 * frame start which happens after vblank start (which is when the plane
229 * registers would get latched), so we can't proceed with the plane update
230 * during the same frame where we disallowed CxSR.
231 *
232 * Certain platforms also have a deeper HPLL SR mode. Fortunately the
233 * HPLL SR mode depends on CxSR itself, so we don't have to hand hold
234 * the hardware w.r.t. HPLL SR when writing to plane registers.
235 * Disallowing just CxSR is sufficient.
236 */
intel_set_memory_cxsr(struct drm_i915_private * dev_priv,bool enable)237 bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
238 {
239 bool ret;
240
241 mutex_lock(&dev_priv->display.wm.wm_mutex);
242 ret = _intel_set_memory_cxsr(dev_priv, enable);
243 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
244 dev_priv->display.wm.vlv.cxsr = enable;
245 else if (IS_G4X(dev_priv))
246 dev_priv->display.wm.g4x.cxsr = enable;
247 mutex_unlock(&dev_priv->display.wm.wm_mutex);
248
249 return ret;
250 }
251
252 /*
253 * Latency for FIFO fetches is dependent on several factors:
254 * - memory configuration (speed, channels)
255 * - chipset
256 * - current MCH state
257 * It can be fairly high in some situations, so here we assume a fairly
258 * pessimal value. It's a tradeoff between extra memory fetches (if we
259 * set this value too high, the FIFO will fetch frequently to stay full)
260 * and power consumption (set it too low to save power and we might see
261 * FIFO underruns and display "flicker").
262 *
263 * A value of 5us seems to be a good balance; safe for very low end
264 * platforms but not overly aggressive on lower latency configs.
265 */
266 static const int pessimal_latency_ns = 5000;
267
268 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
269 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
270
vlv_get_fifo_size(struct intel_crtc_state * crtc_state)271 static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
272 {
273 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
274 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
275 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
276 enum pipe pipe = crtc->pipe;
277 int sprite0_start, sprite1_start;
278 u32 dsparb, dsparb2, dsparb3;
279
280 switch (pipe) {
281 case PIPE_A:
282 dsparb = intel_uncore_read(&dev_priv->uncore,
283 DSPARB(dev_priv));
284 dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
285 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0);
286 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4);
287 break;
288 case PIPE_B:
289 dsparb = intel_uncore_read(&dev_priv->uncore,
290 DSPARB(dev_priv));
291 dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
292 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8);
293 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12);
294 break;
295 case PIPE_C:
296 dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2);
297 dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3);
298 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16);
299 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
300 break;
301 default:
302 MISSING_CASE(pipe);
303 return;
304 }
305
306 fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
307 fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
308 fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
309 fifo_state->plane[PLANE_CURSOR] = 63;
310 }
311
i9xx_get_fifo_size(struct drm_i915_private * dev_priv,enum i9xx_plane_id i9xx_plane)312 static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
313 enum i9xx_plane_id i9xx_plane)
314 {
315 u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
316 int size;
317
318 size = dsparb & 0x7f;
319 if (i9xx_plane == PLANE_B)
320 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
321
322 drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
323 dsparb, plane_name(i9xx_plane), size);
324
325 return size;
326 }
327
i830_get_fifo_size(struct drm_i915_private * dev_priv,enum i9xx_plane_id i9xx_plane)328 static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
329 enum i9xx_plane_id i9xx_plane)
330 {
331 u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
332 int size;
333
334 size = dsparb & 0x1ff;
335 if (i9xx_plane == PLANE_B)
336 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
337 size >>= 1; /* Convert to cachelines */
338
339 drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
340 dsparb, plane_name(i9xx_plane), size);
341
342 return size;
343 }
344
i845_get_fifo_size(struct drm_i915_private * dev_priv,enum i9xx_plane_id i9xx_plane)345 static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
346 enum i9xx_plane_id i9xx_plane)
347 {
348 u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB(dev_priv));
349 int size;
350
351 size = dsparb & 0x7f;
352 size >>= 2; /* Convert to cachelines */
353
354 drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n",
355 dsparb, plane_name(i9xx_plane), size);
356
357 return size;
358 }
359
360 /* Pineview has different values for various configs */
361 static const struct intel_watermark_params pnv_display_wm = {
362 .fifo_size = PINEVIEW_DISPLAY_FIFO,
363 .max_wm = PINEVIEW_MAX_WM,
364 .default_wm = PINEVIEW_DFT_WM,
365 .guard_size = PINEVIEW_GUARD_WM,
366 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
367 };
368
369 static const struct intel_watermark_params pnv_display_hplloff_wm = {
370 .fifo_size = PINEVIEW_DISPLAY_FIFO,
371 .max_wm = PINEVIEW_MAX_WM,
372 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
373 .guard_size = PINEVIEW_GUARD_WM,
374 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
375 };
376
377 static const struct intel_watermark_params pnv_cursor_wm = {
378 .fifo_size = PINEVIEW_CURSOR_FIFO,
379 .max_wm = PINEVIEW_CURSOR_MAX_WM,
380 .default_wm = PINEVIEW_CURSOR_DFT_WM,
381 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
382 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
383 };
384
385 static const struct intel_watermark_params pnv_cursor_hplloff_wm = {
386 .fifo_size = PINEVIEW_CURSOR_FIFO,
387 .max_wm = PINEVIEW_CURSOR_MAX_WM,
388 .default_wm = PINEVIEW_CURSOR_DFT_WM,
389 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
390 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
391 };
392
393 static const struct intel_watermark_params i965_cursor_wm_info = {
394 .fifo_size = I965_CURSOR_FIFO,
395 .max_wm = I965_CURSOR_MAX_WM,
396 .default_wm = I965_CURSOR_DFT_WM,
397 .guard_size = 2,
398 .cacheline_size = I915_FIFO_LINE_SIZE,
399 };
400
401 static const struct intel_watermark_params i945_wm_info = {
402 .fifo_size = I945_FIFO_SIZE,
403 .max_wm = I915_MAX_WM,
404 .default_wm = 1,
405 .guard_size = 2,
406 .cacheline_size = I915_FIFO_LINE_SIZE,
407 };
408
409 static const struct intel_watermark_params i915_wm_info = {
410 .fifo_size = I915_FIFO_SIZE,
411 .max_wm = I915_MAX_WM,
412 .default_wm = 1,
413 .guard_size = 2,
414 .cacheline_size = I915_FIFO_LINE_SIZE,
415 };
416
417 static const struct intel_watermark_params i830_a_wm_info = {
418 .fifo_size = I855GM_FIFO_SIZE,
419 .max_wm = I915_MAX_WM,
420 .default_wm = 1,
421 .guard_size = 2,
422 .cacheline_size = I830_FIFO_LINE_SIZE,
423 };
424
425 static const struct intel_watermark_params i830_bc_wm_info = {
426 .fifo_size = I855GM_FIFO_SIZE,
427 .max_wm = I915_MAX_WM / 2,
428 .default_wm = 1,
429 .guard_size = 2,
430 .cacheline_size = I830_FIFO_LINE_SIZE,
431 };
432
433 static const struct intel_watermark_params i845_wm_info = {
434 .fifo_size = I830_FIFO_SIZE,
435 .max_wm = I915_MAX_WM,
436 .default_wm = 1,
437 .guard_size = 2,
438 .cacheline_size = I830_FIFO_LINE_SIZE,
439 };
440
441 /**
442 * intel_wm_method1 - Method 1 / "small buffer" watermark formula
443 * @pixel_rate: Pipe pixel rate in kHz
444 * @cpp: Plane bytes per pixel
445 * @latency: Memory wakeup latency in 0.1us units
446 *
447 * Compute the watermark using the method 1 or "small buffer"
448 * formula. The caller may additonally add extra cachelines
449 * to account for TLB misses and clock crossings.
450 *
451 * This method is concerned with the short term drain rate
452 * of the FIFO, ie. it does not account for blanking periods
453 * which would effectively reduce the average drain rate across
454 * a longer period. The name "small" refers to the fact the
455 * FIFO is relatively small compared to the amount of data
456 * fetched.
457 *
458 * The FIFO level vs. time graph might look something like:
459 *
460 * |\ |\
461 * | \ | \
462 * __---__---__ (- plane active, _ blanking)
463 * -> time
464 *
465 * or perhaps like this:
466 *
467 * |\|\ |\|\
468 * __----__----__ (- plane active, _ blanking)
469 * -> time
470 *
471 * Returns:
472 * The watermark in bytes
473 */
intel_wm_method1(unsigned int pixel_rate,unsigned int cpp,unsigned int latency)474 static unsigned int intel_wm_method1(unsigned int pixel_rate,
475 unsigned int cpp,
476 unsigned int latency)
477 {
478 u64 ret;
479
480 ret = mul_u32_u32(pixel_rate, cpp * latency);
481 ret = DIV_ROUND_UP_ULL(ret, 10000);
482
483 return ret;
484 }
485
486 /**
487 * intel_wm_method2 - Method 2 / "large buffer" watermark formula
488 * @pixel_rate: Pipe pixel rate in kHz
489 * @htotal: Pipe horizontal total
490 * @width: Plane width in pixels
491 * @cpp: Plane bytes per pixel
492 * @latency: Memory wakeup latency in 0.1us units
493 *
494 * Compute the watermark using the method 2 or "large buffer"
495 * formula. The caller may additonally add extra cachelines
496 * to account for TLB misses and clock crossings.
497 *
498 * This method is concerned with the long term drain rate
499 * of the FIFO, ie. it does account for blanking periods
500 * which effectively reduce the average drain rate across
501 * a longer period. The name "large" refers to the fact the
502 * FIFO is relatively large compared to the amount of data
503 * fetched.
504 *
505 * The FIFO level vs. time graph might look something like:
506 *
507 * |\___ |\___
508 * | \___ | \___
509 * | \ | \
510 * __ --__--__--__--__--__--__ (- plane active, _ blanking)
511 * -> time
512 *
513 * Returns:
514 * The watermark in bytes
515 */
intel_wm_method2(unsigned int pixel_rate,unsigned int htotal,unsigned int width,unsigned int cpp,unsigned int latency)516 static unsigned int intel_wm_method2(unsigned int pixel_rate,
517 unsigned int htotal,
518 unsigned int width,
519 unsigned int cpp,
520 unsigned int latency)
521 {
522 unsigned int ret;
523
524 /*
525 * FIXME remove once all users are computing
526 * watermarks in the correct place.
527 */
528 if (WARN_ON_ONCE(htotal == 0))
529 htotal = 1;
530
531 ret = (latency * pixel_rate) / (htotal * 10000);
532 ret = (ret + 1) * width * cpp;
533
534 return ret;
535 }
536
537 /**
538 * intel_calculate_wm - calculate watermark level
539 * @i915: the device
540 * @pixel_rate: pixel clock
541 * @wm: chip FIFO params
542 * @fifo_size: size of the FIFO buffer
543 * @cpp: bytes per pixel
544 * @latency_ns: memory latency for the platform
545 *
546 * Calculate the watermark level (the level at which the display plane will
547 * start fetching from memory again). Each chip has a different display
548 * FIFO size and allocation, so the caller needs to figure that out and pass
549 * in the correct intel_watermark_params structure.
550 *
551 * As the pixel clock runs, the FIFO will be drained at a rate that depends
552 * on the pixel size. When it reaches the watermark level, it'll start
553 * fetching FIFO line sized based chunks from memory until the FIFO fills
554 * past the watermark point. If the FIFO drains completely, a FIFO underrun
555 * will occur, and a display engine hang could result.
556 */
intel_calculate_wm(struct drm_i915_private * i915,int pixel_rate,const struct intel_watermark_params * wm,int fifo_size,int cpp,unsigned int latency_ns)557 static unsigned int intel_calculate_wm(struct drm_i915_private *i915,
558 int pixel_rate,
559 const struct intel_watermark_params *wm,
560 int fifo_size, int cpp,
561 unsigned int latency_ns)
562 {
563 int entries, wm_size;
564
565 /*
566 * Note: we need to make sure we don't overflow for various clock &
567 * latency values.
568 * clocks go from a few thousand to several hundred thousand.
569 * latency is usually a few thousand
570 */
571 entries = intel_wm_method1(pixel_rate, cpp,
572 latency_ns / 100);
573 entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
574 wm->guard_size;
575 drm_dbg_kms(&i915->drm, "FIFO entries required for mode: %d\n", entries);
576
577 wm_size = fifo_size - entries;
578 drm_dbg_kms(&i915->drm, "FIFO watermark level: %d\n", wm_size);
579
580 /* Don't promote wm_size to unsigned... */
581 if (wm_size > wm->max_wm)
582 wm_size = wm->max_wm;
583 if (wm_size <= 0)
584 wm_size = wm->default_wm;
585
586 /*
587 * Bspec seems to indicate that the value shouldn't be lower than
588 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
589 * Lets go for 8 which is the burst size since certain platforms
590 * already use a hardcoded 8 (which is what the spec says should be
591 * done).
592 */
593 if (wm_size <= 8)
594 wm_size = 8;
595
596 return wm_size;
597 }
598
is_disabling(int old,int new,int threshold)599 static bool is_disabling(int old, int new, int threshold)
600 {
601 return old >= threshold && new < threshold;
602 }
603
is_enabling(int old,int new,int threshold)604 static bool is_enabling(int old, int new, int threshold)
605 {
606 return old < threshold && new >= threshold;
607 }
608
intel_crtc_active(struct intel_crtc * crtc)609 static bool intel_crtc_active(struct intel_crtc *crtc)
610 {
611 /* Be paranoid as we can arrive here with only partial
612 * state retrieved from the hardware during setup.
613 *
614 * We can ditch the adjusted_mode.crtc_clock check as soon
615 * as Haswell has gained clock readout/fastboot support.
616 *
617 * We can ditch the crtc->primary->state->fb check as soon as we can
618 * properly reconstruct framebuffers.
619 *
620 * FIXME: The intel_crtc->active here should be switched to
621 * crtc->state->active once we have proper CRTC states wired up
622 * for atomic.
623 */
624 return crtc->active && crtc->base.primary->state->fb &&
625 crtc->config->hw.adjusted_mode.crtc_clock;
626 }
627
single_enabled_crtc(struct drm_i915_private * dev_priv)628 static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
629 {
630 struct intel_crtc *crtc, *enabled = NULL;
631
632 for_each_intel_crtc(&dev_priv->drm, crtc) {
633 if (intel_crtc_active(crtc)) {
634 if (enabled)
635 return NULL;
636 enabled = crtc;
637 }
638 }
639
640 return enabled;
641 }
642
pnv_update_wm(struct drm_i915_private * dev_priv)643 static void pnv_update_wm(struct drm_i915_private *dev_priv)
644 {
645 struct intel_crtc *crtc;
646 const struct cxsr_latency *latency;
647 u32 reg;
648 unsigned int wm;
649
650 latency = pnv_get_cxsr_latency(dev_priv);
651 if (!latency) {
652 drm_dbg_kms(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n");
653 intel_set_memory_cxsr(dev_priv, false);
654 return;
655 }
656
657 crtc = single_enabled_crtc(dev_priv);
658 if (crtc) {
659 const struct drm_framebuffer *fb =
660 crtc->base.primary->state->fb;
661 int pixel_rate = crtc->config->pixel_rate;
662 int cpp = fb->format->cpp[0];
663
664 /* Display SR */
665 wm = intel_calculate_wm(dev_priv, pixel_rate,
666 &pnv_display_wm,
667 pnv_display_wm.fifo_size,
668 cpp, latency->display_sr);
669 reg = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
670 reg &= ~DSPFW_SR_MASK;
671 reg |= FW_WM(wm, SR);
672 intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv), reg);
673 drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
674
675 /* cursor SR */
676 wm = intel_calculate_wm(dev_priv, pixel_rate,
677 &pnv_cursor_wm,
678 pnv_display_wm.fifo_size,
679 4, latency->cursor_sr);
680 intel_uncore_rmw(&dev_priv->uncore, DSPFW3(dev_priv),
681 DSPFW_CURSOR_SR_MASK,
682 FW_WM(wm, CURSOR_SR));
683
684 /* Display HPLL off SR */
685 wm = intel_calculate_wm(dev_priv, pixel_rate,
686 &pnv_display_hplloff_wm,
687 pnv_display_hplloff_wm.fifo_size,
688 cpp, latency->display_hpll_disable);
689 intel_uncore_rmw(&dev_priv->uncore, DSPFW3(dev_priv),
690 DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
691
692 /* cursor HPLL off SR */
693 wm = intel_calculate_wm(dev_priv, pixel_rate,
694 &pnv_cursor_hplloff_wm,
695 pnv_display_hplloff_wm.fifo_size,
696 4, latency->cursor_hpll_disable);
697 reg = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
698 reg &= ~DSPFW_HPLL_CURSOR_MASK;
699 reg |= FW_WM(wm, HPLL_CURSOR);
700 intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv), reg);
701 drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg);
702
703 intel_set_memory_cxsr(dev_priv, true);
704 } else {
705 intel_set_memory_cxsr(dev_priv, false);
706 }
707 }
708
i9xx_wm_need_update(const struct intel_plane_state * old_plane_state,const struct intel_plane_state * new_plane_state)709 static bool i9xx_wm_need_update(const struct intel_plane_state *old_plane_state,
710 const struct intel_plane_state *new_plane_state)
711 {
712 /* Update watermarks on tiling or size changes. */
713 if (old_plane_state->uapi.visible != new_plane_state->uapi.visible)
714 return true;
715
716 if (!old_plane_state->hw.fb || !new_plane_state->hw.fb)
717 return false;
718
719 if (old_plane_state->hw.fb->modifier != new_plane_state->hw.fb->modifier ||
720 old_plane_state->hw.rotation != new_plane_state->hw.rotation ||
721 drm_rect_width(&old_plane_state->uapi.src) != drm_rect_width(&new_plane_state->uapi.src) ||
722 drm_rect_height(&old_plane_state->uapi.src) != drm_rect_height(&new_plane_state->uapi.src) ||
723 drm_rect_width(&old_plane_state->uapi.dst) != drm_rect_width(&new_plane_state->uapi.dst) ||
724 drm_rect_height(&old_plane_state->uapi.dst) != drm_rect_height(&new_plane_state->uapi.dst))
725 return true;
726
727 return false;
728 }
729
i9xx_wm_compute(struct intel_crtc_state * new_crtc_state,const struct intel_plane_state * old_plane_state,const struct intel_plane_state * new_plane_state)730 static void i9xx_wm_compute(struct intel_crtc_state *new_crtc_state,
731 const struct intel_plane_state *old_plane_state,
732 const struct intel_plane_state *new_plane_state)
733 {
734 bool turn_off, turn_on, visible, was_visible, mode_changed;
735
736 mode_changed = intel_crtc_needs_modeset(new_crtc_state);
737 was_visible = old_plane_state->uapi.visible;
738 visible = new_plane_state->uapi.visible;
739
740 if (!was_visible && !visible)
741 return;
742
743 turn_off = was_visible && (!visible || mode_changed);
744 turn_on = visible && (!was_visible || mode_changed);
745
746 /* FIXME nuke when all wm code is atomic */
747 if (turn_on) {
748 new_crtc_state->update_wm_pre = true;
749 } else if (turn_off) {
750 new_crtc_state->update_wm_post = true;
751 } else if (i9xx_wm_need_update(old_plane_state, new_plane_state)) {
752 /* FIXME bollocks */
753 new_crtc_state->update_wm_pre = true;
754 new_crtc_state->update_wm_post = true;
755 }
756 }
757
i9xx_compute_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)758 static int i9xx_compute_watermarks(struct intel_atomic_state *state,
759 struct intel_crtc *crtc)
760 {
761 struct intel_crtc_state *new_crtc_state =
762 intel_atomic_get_new_crtc_state(state, crtc);
763 const struct intel_plane_state *old_plane_state;
764 const struct intel_plane_state *new_plane_state;
765 struct intel_plane *plane;
766 int i;
767
768 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
769 new_plane_state, i) {
770 if (plane->pipe != crtc->pipe)
771 continue;
772
773 i9xx_wm_compute(new_crtc_state, old_plane_state, new_plane_state);
774 }
775
776 return 0;
777 }
778
779 /*
780 * Documentation says:
781 * "If the line size is small, the TLB fetches can get in the way of the
782 * data fetches, causing some lag in the pixel data return which is not
783 * accounted for in the above formulas. The following adjustment only
784 * needs to be applied if eight whole lines fit in the buffer at once.
785 * The WM is adjusted upwards by the difference between the FIFO size
786 * and the size of 8 whole lines. This adjustment is always performed
787 * in the actual pixel depth regardless of whether FBC is enabled or not."
788 */
g4x_tlb_miss_wa(int fifo_size,int width,int cpp)789 static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
790 {
791 int tlb_miss = fifo_size * 64 - width * cpp * 8;
792
793 return max(0, tlb_miss);
794 }
795
g4x_write_wm_values(struct drm_i915_private * dev_priv,const struct g4x_wm_values * wm)796 static void g4x_write_wm_values(struct drm_i915_private *dev_priv,
797 const struct g4x_wm_values *wm)
798 {
799 struct intel_display *display = &dev_priv->display;
800 enum pipe pipe;
801
802 for_each_pipe(dev_priv, pipe)
803 trace_g4x_wm(intel_crtc_for_pipe(display, pipe), wm);
804
805 intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
806 FW_WM(wm->sr.plane, SR) |
807 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
808 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
809 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
810 intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
811 (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) |
812 FW_WM(wm->sr.fbc, FBC_SR) |
813 FW_WM(wm->hpll.fbc, FBC_HPLL_SR) |
814 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) |
815 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
816 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
817 intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
818 (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) |
819 FW_WM(wm->sr.cursor, CURSOR_SR) |
820 FW_WM(wm->hpll.cursor, HPLL_CURSOR) |
821 FW_WM(wm->hpll.plane, HPLL_SR));
822
823 intel_uncore_posting_read(&dev_priv->uncore, DSPFW1(dev_priv));
824 }
825
826 #define FW_WM_VLV(value, plane) \
827 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
828
vlv_write_wm_values(struct drm_i915_private * dev_priv,const struct vlv_wm_values * wm)829 static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
830 const struct vlv_wm_values *wm)
831 {
832 struct intel_display *display = &dev_priv->display;
833 enum pipe pipe;
834
835 for_each_pipe(dev_priv, pipe) {
836 trace_vlv_wm(intel_crtc_for_pipe(display, pipe), wm);
837
838 intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe),
839 (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
840 (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
841 (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
842 (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
843 }
844
845 /*
846 * Zero the (unused) WM1 watermarks, and also clear all the
847 * high order bits so that there are no out of bounds values
848 * present in the registers during the reprogramming.
849 */
850 intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0);
851 intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0);
852 intel_uncore_write(&dev_priv->uncore, DSPFW4, 0);
853 intel_uncore_write(&dev_priv->uncore, DSPFW5, 0);
854 intel_uncore_write(&dev_priv->uncore, DSPFW6, 0);
855
856 intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
857 FW_WM(wm->sr.plane, SR) |
858 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
859 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
860 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
861 intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
862 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
863 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
864 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
865 intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
866 FW_WM(wm->sr.cursor, CURSOR_SR));
867
868 if (IS_CHERRYVIEW(dev_priv)) {
869 intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV,
870 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
871 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
872 intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV,
873 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
874 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
875 intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV,
876 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
877 FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
878 intel_uncore_write(&dev_priv->uncore, DSPHOWM,
879 FW_WM(wm->sr.plane >> 9, SR_HI) |
880 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
881 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
882 FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
883 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
884 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
885 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
886 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
887 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
888 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
889 } else {
890 intel_uncore_write(&dev_priv->uncore, DSPFW7,
891 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
892 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
893 intel_uncore_write(&dev_priv->uncore, DSPHOWM,
894 FW_WM(wm->sr.plane >> 9, SR_HI) |
895 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
896 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
897 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
898 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
899 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
900 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
901 }
902
903 intel_uncore_posting_read(&dev_priv->uncore, DSPFW1(dev_priv));
904 }
905
906 #undef FW_WM_VLV
907
g4x_setup_wm_latency(struct drm_i915_private * dev_priv)908 static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
909 {
910 /* all latencies in usec */
911 dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
912 dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
913 dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
914
915 dev_priv->display.wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
916 }
917
g4x_plane_fifo_size(enum plane_id plane_id,int level)918 static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
919 {
920 /*
921 * DSPCNTR[13] supposedly controls whether the
922 * primary plane can use the FIFO space otherwise
923 * reserved for the sprite plane. It's not 100% clear
924 * what the actual FIFO size is, but it looks like we
925 * can happily set both primary and sprite watermarks
926 * up to 127 cachelines. So that would seem to mean
927 * that either DSPCNTR[13] doesn't do anything, or that
928 * the total FIFO is >= 256 cachelines in size. Either
929 * way, we don't seem to have to worry about this
930 * repartitioning as the maximum watermark value the
931 * register can hold for each plane is lower than the
932 * minimum FIFO size.
933 */
934 switch (plane_id) {
935 case PLANE_CURSOR:
936 return 63;
937 case PLANE_PRIMARY:
938 return level == G4X_WM_LEVEL_NORMAL ? 127 : 511;
939 case PLANE_SPRITE0:
940 return level == G4X_WM_LEVEL_NORMAL ? 127 : 0;
941 default:
942 MISSING_CASE(plane_id);
943 return 0;
944 }
945 }
946
g4x_fbc_fifo_size(int level)947 static int g4x_fbc_fifo_size(int level)
948 {
949 switch (level) {
950 case G4X_WM_LEVEL_SR:
951 return 7;
952 case G4X_WM_LEVEL_HPLL:
953 return 15;
954 default:
955 MISSING_CASE(level);
956 return 0;
957 }
958 }
959
g4x_compute_wm(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,int level)960 static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
961 const struct intel_plane_state *plane_state,
962 int level)
963 {
964 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
965 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
966 const struct drm_display_mode *pipe_mode =
967 &crtc_state->hw.pipe_mode;
968 unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10;
969 unsigned int pixel_rate, htotal, cpp, width, wm;
970
971 if (latency == 0)
972 return USHRT_MAX;
973
974 if (!intel_wm_plane_visible(crtc_state, plane_state))
975 return 0;
976
977 cpp = plane_state->hw.fb->format->cpp[0];
978
979 /*
980 * WaUse32BppForSRWM:ctg,elk
981 *
982 * The spec fails to list this restriction for the
983 * HPLL watermark, which seems a little strange.
984 * Let's use 32bpp for the HPLL watermark as well.
985 */
986 if (plane->id == PLANE_PRIMARY &&
987 level != G4X_WM_LEVEL_NORMAL)
988 cpp = max(cpp, 4u);
989
990 pixel_rate = crtc_state->pixel_rate;
991 htotal = pipe_mode->crtc_htotal;
992 width = drm_rect_width(&plane_state->uapi.src) >> 16;
993
994 if (plane->id == PLANE_CURSOR) {
995 wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
996 } else if (plane->id == PLANE_PRIMARY &&
997 level == G4X_WM_LEVEL_NORMAL) {
998 wm = intel_wm_method1(pixel_rate, cpp, latency);
999 } else {
1000 unsigned int small, large;
1001
1002 small = intel_wm_method1(pixel_rate, cpp, latency);
1003 large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency);
1004
1005 wm = min(small, large);
1006 }
1007
1008 wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level),
1009 width, cpp);
1010
1011 wm = DIV_ROUND_UP(wm, 64) + 2;
1012
1013 return min_t(unsigned int, wm, USHRT_MAX);
1014 }
1015
g4x_raw_plane_wm_set(struct intel_crtc_state * crtc_state,int level,enum plane_id plane_id,u16 value)1016 static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1017 int level, enum plane_id plane_id, u16 value)
1018 {
1019 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1020 bool dirty = false;
1021
1022 for (; level < dev_priv->display.wm.num_levels; level++) {
1023 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1024
1025 dirty |= raw->plane[plane_id] != value;
1026 raw->plane[plane_id] = value;
1027 }
1028
1029 return dirty;
1030 }
1031
g4x_raw_fbc_wm_set(struct intel_crtc_state * crtc_state,int level,u16 value)1032 static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
1033 int level, u16 value)
1034 {
1035 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1036 bool dirty = false;
1037
1038 /* NORMAL level doesn't have an FBC watermark */
1039 level = max(level, G4X_WM_LEVEL_SR);
1040
1041 for (; level < dev_priv->display.wm.num_levels; level++) {
1042 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1043
1044 dirty |= raw->fbc != value;
1045 raw->fbc = value;
1046 }
1047
1048 return dirty;
1049 }
1050
1051 static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
1052 const struct intel_plane_state *plane_state,
1053 u32 pri_val);
1054
g4x_raw_plane_wm_compute(struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)1055 static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1056 const struct intel_plane_state *plane_state)
1057 {
1058 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1059 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1060 enum plane_id plane_id = plane->id;
1061 bool dirty = false;
1062 int level;
1063
1064 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1065 dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1066 if (plane_id == PLANE_PRIMARY)
1067 dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0);
1068 goto out;
1069 }
1070
1071 for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
1072 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1073 int wm, max_wm;
1074
1075 wm = g4x_compute_wm(crtc_state, plane_state, level);
1076 max_wm = g4x_plane_fifo_size(plane_id, level);
1077
1078 if (wm > max_wm)
1079 break;
1080
1081 dirty |= raw->plane[plane_id] != wm;
1082 raw->plane[plane_id] = wm;
1083
1084 if (plane_id != PLANE_PRIMARY ||
1085 level == G4X_WM_LEVEL_NORMAL)
1086 continue;
1087
1088 wm = ilk_compute_fbc_wm(crtc_state, plane_state,
1089 raw->plane[plane_id]);
1090 max_wm = g4x_fbc_fifo_size(level);
1091
1092 /*
1093 * FBC wm is not mandatory as we
1094 * can always just disable its use.
1095 */
1096 if (wm > max_wm)
1097 wm = USHRT_MAX;
1098
1099 dirty |= raw->fbc != wm;
1100 raw->fbc = wm;
1101 }
1102
1103 /* mark watermarks as invalid */
1104 dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1105
1106 if (plane_id == PLANE_PRIMARY)
1107 dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
1108
1109 out:
1110 if (dirty) {
1111 drm_dbg_kms(&dev_priv->drm,
1112 "%s watermarks: normal=%d, SR=%d, HPLL=%d\n",
1113 plane->base.name,
1114 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id],
1115 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id],
1116 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]);
1117
1118 if (plane_id == PLANE_PRIMARY)
1119 drm_dbg_kms(&dev_priv->drm,
1120 "FBC watermarks: SR=%d, HPLL=%d\n",
1121 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc,
1122 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc);
1123 }
1124
1125 return dirty;
1126 }
1127
g4x_raw_plane_wm_is_valid(const struct intel_crtc_state * crtc_state,enum plane_id plane_id,int level)1128 static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1129 enum plane_id plane_id, int level)
1130 {
1131 const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
1132
1133 return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level);
1134 }
1135
g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state * crtc_state,int level)1136 static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
1137 int level)
1138 {
1139 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1140
1141 if (level >= dev_priv->display.wm.num_levels)
1142 return false;
1143
1144 return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1145 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1146 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1147 }
1148
1149 /* mark all levels starting from 'level' as invalid */
g4x_invalidate_wms(struct intel_crtc * crtc,struct g4x_wm_state * wm_state,int level)1150 static void g4x_invalidate_wms(struct intel_crtc *crtc,
1151 struct g4x_wm_state *wm_state, int level)
1152 {
1153 if (level <= G4X_WM_LEVEL_NORMAL) {
1154 enum plane_id plane_id;
1155
1156 for_each_plane_id_on_crtc(crtc, plane_id)
1157 wm_state->wm.plane[plane_id] = USHRT_MAX;
1158 }
1159
1160 if (level <= G4X_WM_LEVEL_SR) {
1161 wm_state->cxsr = false;
1162 wm_state->sr.cursor = USHRT_MAX;
1163 wm_state->sr.plane = USHRT_MAX;
1164 wm_state->sr.fbc = USHRT_MAX;
1165 }
1166
1167 if (level <= G4X_WM_LEVEL_HPLL) {
1168 wm_state->hpll_en = false;
1169 wm_state->hpll.cursor = USHRT_MAX;
1170 wm_state->hpll.plane = USHRT_MAX;
1171 wm_state->hpll.fbc = USHRT_MAX;
1172 }
1173 }
1174
g4x_compute_fbc_en(const struct g4x_wm_state * wm_state,int level)1175 static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state,
1176 int level)
1177 {
1178 if (level < G4X_WM_LEVEL_SR)
1179 return false;
1180
1181 if (level >= G4X_WM_LEVEL_SR &&
1182 wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR))
1183 return false;
1184
1185 if (level >= G4X_WM_LEVEL_HPLL &&
1186 wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL))
1187 return false;
1188
1189 return true;
1190 }
1191
_g4x_compute_pipe_wm(struct intel_crtc_state * crtc_state)1192 static int _g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1193 {
1194 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1195 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
1196 u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1197 const struct g4x_pipe_wm *raw;
1198 enum plane_id plane_id;
1199 int level;
1200
1201 level = G4X_WM_LEVEL_NORMAL;
1202 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1203 goto out;
1204
1205 raw = &crtc_state->wm.g4x.raw[level];
1206 for_each_plane_id_on_crtc(crtc, plane_id)
1207 wm_state->wm.plane[plane_id] = raw->plane[plane_id];
1208
1209 level = G4X_WM_LEVEL_SR;
1210 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1211 goto out;
1212
1213 raw = &crtc_state->wm.g4x.raw[level];
1214 wm_state->sr.plane = raw->plane[PLANE_PRIMARY];
1215 wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
1216 wm_state->sr.fbc = raw->fbc;
1217
1218 wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY);
1219
1220 level = G4X_WM_LEVEL_HPLL;
1221 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
1222 goto out;
1223
1224 raw = &crtc_state->wm.g4x.raw[level];
1225 wm_state->hpll.plane = raw->plane[PLANE_PRIMARY];
1226 wm_state->hpll.cursor = raw->plane[PLANE_CURSOR];
1227 wm_state->hpll.fbc = raw->fbc;
1228
1229 wm_state->hpll_en = wm_state->cxsr;
1230
1231 level++;
1232
1233 out:
1234 if (level == G4X_WM_LEVEL_NORMAL)
1235 return -EINVAL;
1236
1237 /* invalidate the higher levels */
1238 g4x_invalidate_wms(crtc, wm_state, level);
1239
1240 /*
1241 * Determine if the FBC watermark(s) can be used. IF
1242 * this isn't the case we prefer to disable the FBC
1243 * watermark(s) rather than disable the SR/HPLL
1244 * level(s) entirely. 'level-1' is the highest valid
1245 * level here.
1246 */
1247 wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1);
1248
1249 return 0;
1250 }
1251
g4x_compute_pipe_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)1252 static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
1253 struct intel_crtc *crtc)
1254 {
1255 struct intel_crtc_state *crtc_state =
1256 intel_atomic_get_new_crtc_state(state, crtc);
1257 const struct intel_plane_state *old_plane_state;
1258 const struct intel_plane_state *new_plane_state;
1259 struct intel_plane *plane;
1260 unsigned int dirty = 0;
1261 int i;
1262
1263 for_each_oldnew_intel_plane_in_state(state, plane,
1264 old_plane_state,
1265 new_plane_state, i) {
1266 if (new_plane_state->hw.crtc != &crtc->base &&
1267 old_plane_state->hw.crtc != &crtc->base)
1268 continue;
1269
1270 if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
1271 dirty |= BIT(plane->id);
1272 }
1273
1274 if (!dirty)
1275 return 0;
1276
1277 return _g4x_compute_pipe_wm(crtc_state);
1278 }
1279
g4x_compute_intermediate_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)1280 static int g4x_compute_intermediate_wm(struct intel_atomic_state *state,
1281 struct intel_crtc *crtc)
1282 {
1283 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1284 struct intel_crtc_state *new_crtc_state =
1285 intel_atomic_get_new_crtc_state(state, crtc);
1286 const struct intel_crtc_state *old_crtc_state =
1287 intel_atomic_get_old_crtc_state(state, crtc);
1288 struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
1289 const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
1290 const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
1291 enum plane_id plane_id;
1292
1293 if (!new_crtc_state->hw.active ||
1294 intel_crtc_needs_modeset(new_crtc_state)) {
1295 *intermediate = *optimal;
1296
1297 intermediate->cxsr = false;
1298 intermediate->hpll_en = false;
1299 goto out;
1300 }
1301
1302 intermediate->cxsr = optimal->cxsr && active->cxsr &&
1303 !new_crtc_state->disable_cxsr;
1304 intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
1305 !new_crtc_state->disable_cxsr;
1306 intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
1307
1308 for_each_plane_id_on_crtc(crtc, plane_id) {
1309 intermediate->wm.plane[plane_id] =
1310 max(optimal->wm.plane[plane_id],
1311 active->wm.plane[plane_id]);
1312
1313 drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] >
1314 g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL));
1315 }
1316
1317 intermediate->sr.plane = max(optimal->sr.plane,
1318 active->sr.plane);
1319 intermediate->sr.cursor = max(optimal->sr.cursor,
1320 active->sr.cursor);
1321 intermediate->sr.fbc = max(optimal->sr.fbc,
1322 active->sr.fbc);
1323
1324 intermediate->hpll.plane = max(optimal->hpll.plane,
1325 active->hpll.plane);
1326 intermediate->hpll.cursor = max(optimal->hpll.cursor,
1327 active->hpll.cursor);
1328 intermediate->hpll.fbc = max(optimal->hpll.fbc,
1329 active->hpll.fbc);
1330
1331 drm_WARN_ON(&dev_priv->drm,
1332 (intermediate->sr.plane >
1333 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) ||
1334 intermediate->sr.cursor >
1335 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) &&
1336 intermediate->cxsr);
1337 drm_WARN_ON(&dev_priv->drm,
1338 (intermediate->sr.plane >
1339 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) ||
1340 intermediate->sr.cursor >
1341 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) &&
1342 intermediate->hpll_en);
1343
1344 drm_WARN_ON(&dev_priv->drm,
1345 intermediate->sr.fbc > g4x_fbc_fifo_size(1) &&
1346 intermediate->fbc_en && intermediate->cxsr);
1347 drm_WARN_ON(&dev_priv->drm,
1348 intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
1349 intermediate->fbc_en && intermediate->hpll_en);
1350
1351 out:
1352 /*
1353 * If our intermediate WM are identical to the final WM, then we can
1354 * omit the post-vblank programming; only update if it's different.
1355 */
1356 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1357 new_crtc_state->wm.need_postvbl_update = true;
1358
1359 return 0;
1360 }
1361
g4x_compute_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)1362 static int g4x_compute_watermarks(struct intel_atomic_state *state,
1363 struct intel_crtc *crtc)
1364 {
1365 int ret;
1366
1367 ret = g4x_compute_pipe_wm(state, crtc);
1368 if (ret)
1369 return ret;
1370
1371 ret = g4x_compute_intermediate_wm(state, crtc);
1372 if (ret)
1373 return ret;
1374
1375 return 0;
1376 }
1377
g4x_merge_wm(struct drm_i915_private * dev_priv,struct g4x_wm_values * wm)1378 static void g4x_merge_wm(struct drm_i915_private *dev_priv,
1379 struct g4x_wm_values *wm)
1380 {
1381 struct intel_crtc *crtc;
1382 int num_active_pipes = 0;
1383
1384 wm->cxsr = true;
1385 wm->hpll_en = true;
1386 wm->fbc_en = true;
1387
1388 for_each_intel_crtc(&dev_priv->drm, crtc) {
1389 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1390
1391 if (!crtc->active)
1392 continue;
1393
1394 if (!wm_state->cxsr)
1395 wm->cxsr = false;
1396 if (!wm_state->hpll_en)
1397 wm->hpll_en = false;
1398 if (!wm_state->fbc_en)
1399 wm->fbc_en = false;
1400
1401 num_active_pipes++;
1402 }
1403
1404 if (num_active_pipes != 1) {
1405 wm->cxsr = false;
1406 wm->hpll_en = false;
1407 wm->fbc_en = false;
1408 }
1409
1410 for_each_intel_crtc(&dev_priv->drm, crtc) {
1411 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x;
1412 enum pipe pipe = crtc->pipe;
1413
1414 wm->pipe[pipe] = wm_state->wm;
1415 if (crtc->active && wm->cxsr)
1416 wm->sr = wm_state->sr;
1417 if (crtc->active && wm->hpll_en)
1418 wm->hpll = wm_state->hpll;
1419 }
1420 }
1421
g4x_program_watermarks(struct drm_i915_private * dev_priv)1422 static void g4x_program_watermarks(struct drm_i915_private *dev_priv)
1423 {
1424 struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x;
1425 struct g4x_wm_values new_wm = {};
1426
1427 g4x_merge_wm(dev_priv, &new_wm);
1428
1429 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
1430 return;
1431
1432 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
1433 _intel_set_memory_cxsr(dev_priv, false);
1434
1435 g4x_write_wm_values(dev_priv, &new_wm);
1436
1437 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
1438 _intel_set_memory_cxsr(dev_priv, true);
1439
1440 *old_wm = new_wm;
1441 }
1442
g4x_initial_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)1443 static void g4x_initial_watermarks(struct intel_atomic_state *state,
1444 struct intel_crtc *crtc)
1445 {
1446 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1447 const struct intel_crtc_state *crtc_state =
1448 intel_atomic_get_new_crtc_state(state, crtc);
1449
1450 mutex_lock(&dev_priv->display.wm.wm_mutex);
1451 crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate;
1452 g4x_program_watermarks(dev_priv);
1453 mutex_unlock(&dev_priv->display.wm.wm_mutex);
1454 }
1455
g4x_optimize_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)1456 static void g4x_optimize_watermarks(struct intel_atomic_state *state,
1457 struct intel_crtc *crtc)
1458 {
1459 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1460 const struct intel_crtc_state *crtc_state =
1461 intel_atomic_get_new_crtc_state(state, crtc);
1462
1463 if (!crtc_state->wm.need_postvbl_update)
1464 return;
1465
1466 mutex_lock(&dev_priv->display.wm.wm_mutex);
1467 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
1468 g4x_program_watermarks(dev_priv);
1469 mutex_unlock(&dev_priv->display.wm.wm_mutex);
1470 }
1471
1472 /* latency must be in 0.1us units. */
vlv_wm_method2(unsigned int pixel_rate,unsigned int htotal,unsigned int width,unsigned int cpp,unsigned int latency)1473 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
1474 unsigned int htotal,
1475 unsigned int width,
1476 unsigned int cpp,
1477 unsigned int latency)
1478 {
1479 unsigned int ret;
1480
1481 ret = intel_wm_method2(pixel_rate, htotal,
1482 width, cpp, latency);
1483 ret = DIV_ROUND_UP(ret, 64);
1484
1485 return ret;
1486 }
1487
vlv_setup_wm_latency(struct drm_i915_private * dev_priv)1488 static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
1489 {
1490 /* all latencies in usec */
1491 dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
1492
1493 dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
1494
1495 if (IS_CHERRYVIEW(dev_priv)) {
1496 dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
1497 dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
1498
1499 dev_priv->display.wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
1500 }
1501 }
1502
vlv_compute_wm_level(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,int level)1503 static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
1504 const struct intel_plane_state *plane_state,
1505 int level)
1506 {
1507 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1508 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1509 const struct drm_display_mode *pipe_mode =
1510 &crtc_state->hw.pipe_mode;
1511 unsigned int pixel_rate, htotal, cpp, width, wm;
1512
1513 if (dev_priv->display.wm.pri_latency[level] == 0)
1514 return USHRT_MAX;
1515
1516 if (!intel_wm_plane_visible(crtc_state, plane_state))
1517 return 0;
1518
1519 cpp = plane_state->hw.fb->format->cpp[0];
1520 pixel_rate = crtc_state->pixel_rate;
1521 htotal = pipe_mode->crtc_htotal;
1522 width = drm_rect_width(&plane_state->uapi.src) >> 16;
1523
1524 if (plane->id == PLANE_CURSOR) {
1525 /*
1526 * FIXME the formula gives values that are
1527 * too big for the cursor FIFO, and hence we
1528 * would never be able to use cursors. For
1529 * now just hardcode the watermark.
1530 */
1531 wm = 63;
1532 } else {
1533 wm = vlv_wm_method2(pixel_rate, htotal, width, cpp,
1534 dev_priv->display.wm.pri_latency[level] * 10);
1535 }
1536
1537 return min_t(unsigned int, wm, USHRT_MAX);
1538 }
1539
vlv_need_sprite0_fifo_workaround(unsigned int active_planes)1540 static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
1541 {
1542 return (active_planes & (BIT(PLANE_SPRITE0) |
1543 BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
1544 }
1545
vlv_compute_fifo(struct intel_crtc_state * crtc_state)1546 static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
1547 {
1548 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1549 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1550 const struct g4x_pipe_wm *raw =
1551 &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
1552 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
1553 u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1554 int num_active_planes = hweight8(active_planes);
1555 const int fifo_size = 511;
1556 int fifo_extra, fifo_left = fifo_size;
1557 int sprite0_fifo_extra = 0;
1558 unsigned int total_rate;
1559 enum plane_id plane_id;
1560
1561 /*
1562 * When enabling sprite0 after sprite1 has already been enabled
1563 * we tend to get an underrun unless sprite0 already has some
1564 * FIFO space allcoated. Hence we always allocate at least one
1565 * cacheline for sprite0 whenever sprite1 is enabled.
1566 *
1567 * All other plane enable sequences appear immune to this problem.
1568 */
1569 if (vlv_need_sprite0_fifo_workaround(active_planes))
1570 sprite0_fifo_extra = 1;
1571
1572 total_rate = raw->plane[PLANE_PRIMARY] +
1573 raw->plane[PLANE_SPRITE0] +
1574 raw->plane[PLANE_SPRITE1] +
1575 sprite0_fifo_extra;
1576
1577 if (total_rate > fifo_size)
1578 return -EINVAL;
1579
1580 if (total_rate == 0)
1581 total_rate = 1;
1582
1583 for_each_plane_id_on_crtc(crtc, plane_id) {
1584 unsigned int rate;
1585
1586 if ((active_planes & BIT(plane_id)) == 0) {
1587 fifo_state->plane[plane_id] = 0;
1588 continue;
1589 }
1590
1591 rate = raw->plane[plane_id];
1592 fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
1593 fifo_left -= fifo_state->plane[plane_id];
1594 }
1595
1596 fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
1597 fifo_left -= sprite0_fifo_extra;
1598
1599 fifo_state->plane[PLANE_CURSOR] = 63;
1600
1601 fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
1602
1603 /* spread the remainder evenly */
1604 for_each_plane_id_on_crtc(crtc, plane_id) {
1605 int plane_extra;
1606
1607 if (fifo_left == 0)
1608 break;
1609
1610 if ((active_planes & BIT(plane_id)) == 0)
1611 continue;
1612
1613 plane_extra = min(fifo_extra, fifo_left);
1614 fifo_state->plane[plane_id] += plane_extra;
1615 fifo_left -= plane_extra;
1616 }
1617
1618 drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0);
1619
1620 /* give it all to the first plane if none are active */
1621 if (active_planes == 0) {
1622 drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size);
1623 fifo_state->plane[PLANE_PRIMARY] = fifo_left;
1624 }
1625
1626 return 0;
1627 }
1628
1629 /* mark all levels starting from 'level' as invalid */
vlv_invalidate_wms(struct intel_crtc * crtc,struct vlv_wm_state * wm_state,int level)1630 static void vlv_invalidate_wms(struct intel_crtc *crtc,
1631 struct vlv_wm_state *wm_state, int level)
1632 {
1633 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1634
1635 for (; level < dev_priv->display.wm.num_levels; level++) {
1636 enum plane_id plane_id;
1637
1638 for_each_plane_id_on_crtc(crtc, plane_id)
1639 wm_state->wm[level].plane[plane_id] = USHRT_MAX;
1640
1641 wm_state->sr[level].cursor = USHRT_MAX;
1642 wm_state->sr[level].plane = USHRT_MAX;
1643 }
1644 }
1645
vlv_invert_wm_value(u16 wm,u16 fifo_size)1646 static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
1647 {
1648 if (wm > fifo_size)
1649 return USHRT_MAX;
1650 else
1651 return fifo_size - wm;
1652 }
1653
1654 /*
1655 * Starting from 'level' set all higher
1656 * levels to 'value' in the "raw" watermarks.
1657 */
vlv_raw_plane_wm_set(struct intel_crtc_state * crtc_state,int level,enum plane_id plane_id,u16 value)1658 static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
1659 int level, enum plane_id plane_id, u16 value)
1660 {
1661 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1662 bool dirty = false;
1663
1664 for (; level < dev_priv->display.wm.num_levels; level++) {
1665 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1666
1667 dirty |= raw->plane[plane_id] != value;
1668 raw->plane[plane_id] = value;
1669 }
1670
1671 return dirty;
1672 }
1673
vlv_raw_plane_wm_compute(struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)1674 static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
1675 const struct intel_plane_state *plane_state)
1676 {
1677 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1678 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1679 enum plane_id plane_id = plane->id;
1680 int level;
1681 bool dirty = false;
1682
1683 if (!intel_wm_plane_visible(crtc_state, plane_state)) {
1684 dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
1685 goto out;
1686 }
1687
1688 for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
1689 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1690 int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
1691 int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
1692
1693 if (wm > max_wm)
1694 break;
1695
1696 dirty |= raw->plane[plane_id] != wm;
1697 raw->plane[plane_id] = wm;
1698 }
1699
1700 /* mark all higher levels as invalid */
1701 dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
1702
1703 out:
1704 if (dirty)
1705 drm_dbg_kms(&dev_priv->drm,
1706 "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n",
1707 plane->base.name,
1708 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
1709 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
1710 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
1711
1712 return dirty;
1713 }
1714
vlv_raw_plane_wm_is_valid(const struct intel_crtc_state * crtc_state,enum plane_id plane_id,int level)1715 static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
1716 enum plane_id plane_id, int level)
1717 {
1718 const struct g4x_pipe_wm *raw =
1719 &crtc_state->wm.vlv.raw[level];
1720 const struct vlv_fifo_state *fifo_state =
1721 &crtc_state->wm.vlv.fifo_state;
1722
1723 return raw->plane[plane_id] <= fifo_state->plane[plane_id];
1724 }
1725
vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state * crtc_state,int level)1726 static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
1727 {
1728 return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
1729 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
1730 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
1731 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
1732 }
1733
_vlv_compute_pipe_wm(struct intel_crtc_state * crtc_state)1734 static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1735 {
1736 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1737 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1738 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
1739 const struct vlv_fifo_state *fifo_state =
1740 &crtc_state->wm.vlv.fifo_state;
1741 u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
1742 int num_active_planes = hweight8(active_planes);
1743 enum plane_id plane_id;
1744 int level;
1745
1746 /* initially allow all levels */
1747 wm_state->num_levels = dev_priv->display.wm.num_levels;
1748 /*
1749 * Note that enabling cxsr with no primary/sprite planes
1750 * enabled can wedge the pipe. Hence we only allow cxsr
1751 * with exactly one enabled primary/sprite plane.
1752 */
1753 wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
1754
1755 for (level = 0; level < wm_state->num_levels; level++) {
1756 const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
1757 const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1;
1758
1759 if (!vlv_raw_crtc_wm_is_valid(crtc_state, level))
1760 break;
1761
1762 for_each_plane_id_on_crtc(crtc, plane_id) {
1763 wm_state->wm[level].plane[plane_id] =
1764 vlv_invert_wm_value(raw->plane[plane_id],
1765 fifo_state->plane[plane_id]);
1766 }
1767
1768 wm_state->sr[level].plane =
1769 vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
1770 raw->plane[PLANE_SPRITE0],
1771 raw->plane[PLANE_SPRITE1]),
1772 sr_fifo_size);
1773
1774 wm_state->sr[level].cursor =
1775 vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
1776 63);
1777 }
1778
1779 if (level == 0)
1780 return -EINVAL;
1781
1782 /* limit to only levels we can actually handle */
1783 wm_state->num_levels = level;
1784
1785 /* invalidate the higher levels */
1786 vlv_invalidate_wms(crtc, wm_state, level);
1787
1788 return 0;
1789 }
1790
vlv_compute_pipe_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)1791 static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
1792 struct intel_crtc *crtc)
1793 {
1794 struct intel_crtc_state *crtc_state =
1795 intel_atomic_get_new_crtc_state(state, crtc);
1796 const struct intel_plane_state *old_plane_state;
1797 const struct intel_plane_state *new_plane_state;
1798 struct intel_plane *plane;
1799 unsigned int dirty = 0;
1800 int i;
1801
1802 for_each_oldnew_intel_plane_in_state(state, plane,
1803 old_plane_state,
1804 new_plane_state, i) {
1805 if (new_plane_state->hw.crtc != &crtc->base &&
1806 old_plane_state->hw.crtc != &crtc->base)
1807 continue;
1808
1809 if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
1810 dirty |= BIT(plane->id);
1811 }
1812
1813 /*
1814 * DSPARB registers may have been reset due to the
1815 * power well being turned off. Make sure we restore
1816 * them to a consistent state even if no primary/sprite
1817 * planes are initially active. We also force a FIFO
1818 * recomputation so that we are sure to sanitize the
1819 * FIFO setting we took over from the BIOS even if there
1820 * are no active planes on the crtc.
1821 */
1822 if (intel_crtc_needs_modeset(crtc_state))
1823 dirty = ~0;
1824
1825 if (!dirty)
1826 return 0;
1827
1828 /* cursor changes don't warrant a FIFO recompute */
1829 if (dirty & ~BIT(PLANE_CURSOR)) {
1830 const struct intel_crtc_state *old_crtc_state =
1831 intel_atomic_get_old_crtc_state(state, crtc);
1832 const struct vlv_fifo_state *old_fifo_state =
1833 &old_crtc_state->wm.vlv.fifo_state;
1834 const struct vlv_fifo_state *new_fifo_state =
1835 &crtc_state->wm.vlv.fifo_state;
1836 int ret;
1837
1838 ret = vlv_compute_fifo(crtc_state);
1839 if (ret)
1840 return ret;
1841
1842 if (intel_crtc_needs_modeset(crtc_state) ||
1843 memcmp(old_fifo_state, new_fifo_state,
1844 sizeof(*new_fifo_state)) != 0)
1845 crtc_state->fifo_changed = true;
1846 }
1847
1848 return _vlv_compute_pipe_wm(crtc_state);
1849 }
1850
1851 #define VLV_FIFO(plane, value) \
1852 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1853
vlv_atomic_update_fifo(struct intel_atomic_state * state,struct intel_crtc * crtc)1854 static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
1855 struct intel_crtc *crtc)
1856 {
1857 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1858 struct intel_uncore *uncore = &dev_priv->uncore;
1859 const struct intel_crtc_state *crtc_state =
1860 intel_atomic_get_new_crtc_state(state, crtc);
1861 const struct vlv_fifo_state *fifo_state =
1862 &crtc_state->wm.vlv.fifo_state;
1863 int sprite0_start, sprite1_start, fifo_size;
1864 u32 dsparb, dsparb2, dsparb3;
1865
1866 if (!crtc_state->fifo_changed)
1867 return;
1868
1869 sprite0_start = fifo_state->plane[PLANE_PRIMARY];
1870 sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
1871 fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
1872
1873 drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
1874 drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
1875
1876 trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
1877
1878 /*
1879 * uncore.lock serves a double purpose here. It allows us to
1880 * use the less expensive I915_{READ,WRITE}_FW() functions, and
1881 * it protects the DSPARB registers from getting clobbered by
1882 * parallel updates from multiple pipes.
1883 *
1884 * intel_pipe_update_start() has already disabled interrupts
1885 * for us, so a plain spin_lock() is sufficient here.
1886 */
1887 spin_lock(&uncore->lock);
1888
1889 switch (crtc->pipe) {
1890 case PIPE_A:
1891 dsparb = intel_uncore_read_fw(uncore, DSPARB(dev_priv));
1892 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
1893
1894 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
1895 VLV_FIFO(SPRITEB, 0xff));
1896 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) |
1897 VLV_FIFO(SPRITEB, sprite1_start));
1898
1899 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) |
1900 VLV_FIFO(SPRITEB_HI, 0x1));
1901 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
1902 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
1903
1904 intel_uncore_write_fw(uncore, DSPARB(dev_priv), dsparb);
1905 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
1906 break;
1907 case PIPE_B:
1908 dsparb = intel_uncore_read_fw(uncore, DSPARB(dev_priv));
1909 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
1910
1911 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
1912 VLV_FIFO(SPRITED, 0xff));
1913 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) |
1914 VLV_FIFO(SPRITED, sprite1_start));
1915
1916 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) |
1917 VLV_FIFO(SPRITED_HI, 0xff));
1918 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
1919 VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
1920
1921 intel_uncore_write_fw(uncore, DSPARB(dev_priv), dsparb);
1922 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
1923 break;
1924 case PIPE_C:
1925 dsparb3 = intel_uncore_read_fw(uncore, DSPARB3);
1926 dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
1927
1928 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
1929 VLV_FIFO(SPRITEF, 0xff));
1930 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) |
1931 VLV_FIFO(SPRITEF, sprite1_start));
1932
1933 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) |
1934 VLV_FIFO(SPRITEF_HI, 0xff));
1935 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
1936 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
1937
1938 intel_uncore_write_fw(uncore, DSPARB3, dsparb3);
1939 intel_uncore_write_fw(uncore, DSPARB2, dsparb2);
1940 break;
1941 default:
1942 break;
1943 }
1944
1945 intel_uncore_posting_read_fw(uncore, DSPARB(dev_priv));
1946
1947 spin_unlock(&uncore->lock);
1948 }
1949
1950 #undef VLV_FIFO
1951
vlv_compute_intermediate_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)1952 static int vlv_compute_intermediate_wm(struct intel_atomic_state *state,
1953 struct intel_crtc *crtc)
1954 {
1955 struct intel_crtc_state *new_crtc_state =
1956 intel_atomic_get_new_crtc_state(state, crtc);
1957 const struct intel_crtc_state *old_crtc_state =
1958 intel_atomic_get_old_crtc_state(state, crtc);
1959 struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
1960 const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
1961 const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
1962 int level;
1963
1964 if (!new_crtc_state->hw.active ||
1965 intel_crtc_needs_modeset(new_crtc_state)) {
1966 *intermediate = *optimal;
1967
1968 intermediate->cxsr = false;
1969 goto out;
1970 }
1971
1972 intermediate->num_levels = min(optimal->num_levels, active->num_levels);
1973 intermediate->cxsr = optimal->cxsr && active->cxsr &&
1974 !new_crtc_state->disable_cxsr;
1975
1976 for (level = 0; level < intermediate->num_levels; level++) {
1977 enum plane_id plane_id;
1978
1979 for_each_plane_id_on_crtc(crtc, plane_id) {
1980 intermediate->wm[level].plane[plane_id] =
1981 min(optimal->wm[level].plane[plane_id],
1982 active->wm[level].plane[plane_id]);
1983 }
1984
1985 intermediate->sr[level].plane = min(optimal->sr[level].plane,
1986 active->sr[level].plane);
1987 intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
1988 active->sr[level].cursor);
1989 }
1990
1991 vlv_invalidate_wms(crtc, intermediate, level);
1992
1993 out:
1994 /*
1995 * If our intermediate WM are identical to the final WM, then we can
1996 * omit the post-vblank programming; only update if it's different.
1997 */
1998 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1999 new_crtc_state->wm.need_postvbl_update = true;
2000
2001 return 0;
2002 }
2003
vlv_compute_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)2004 static int vlv_compute_watermarks(struct intel_atomic_state *state,
2005 struct intel_crtc *crtc)
2006 {
2007 int ret;
2008
2009 ret = vlv_compute_pipe_wm(state, crtc);
2010 if (ret)
2011 return ret;
2012
2013 ret = vlv_compute_intermediate_wm(state, crtc);
2014 if (ret)
2015 return ret;
2016
2017 return 0;
2018 }
2019
vlv_merge_wm(struct drm_i915_private * dev_priv,struct vlv_wm_values * wm)2020 static void vlv_merge_wm(struct drm_i915_private *dev_priv,
2021 struct vlv_wm_values *wm)
2022 {
2023 struct intel_crtc *crtc;
2024 int num_active_pipes = 0;
2025
2026 wm->level = dev_priv->display.wm.num_levels - 1;
2027 wm->cxsr = true;
2028
2029 for_each_intel_crtc(&dev_priv->drm, crtc) {
2030 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2031
2032 if (!crtc->active)
2033 continue;
2034
2035 if (!wm_state->cxsr)
2036 wm->cxsr = false;
2037
2038 num_active_pipes++;
2039 wm->level = min_t(int, wm->level, wm_state->num_levels - 1);
2040 }
2041
2042 if (num_active_pipes != 1)
2043 wm->cxsr = false;
2044
2045 if (num_active_pipes > 1)
2046 wm->level = VLV_WM_LEVEL_PM2;
2047
2048 for_each_intel_crtc(&dev_priv->drm, crtc) {
2049 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
2050 enum pipe pipe = crtc->pipe;
2051
2052 wm->pipe[pipe] = wm_state->wm[wm->level];
2053 if (crtc->active && wm->cxsr)
2054 wm->sr = wm_state->sr[wm->level];
2055
2056 wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
2057 wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
2058 wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
2059 wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
2060 }
2061 }
2062
vlv_program_watermarks(struct drm_i915_private * dev_priv)2063 static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
2064 {
2065 struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv;
2066 struct vlv_wm_values new_wm = {};
2067
2068 vlv_merge_wm(dev_priv, &new_wm);
2069
2070 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
2071 return;
2072
2073 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2074 chv_set_memory_dvfs(dev_priv, false);
2075
2076 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2077 chv_set_memory_pm5(dev_priv, false);
2078
2079 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
2080 _intel_set_memory_cxsr(dev_priv, false);
2081
2082 vlv_write_wm_values(dev_priv, &new_wm);
2083
2084 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
2085 _intel_set_memory_cxsr(dev_priv, true);
2086
2087 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
2088 chv_set_memory_pm5(dev_priv, true);
2089
2090 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
2091 chv_set_memory_dvfs(dev_priv, true);
2092
2093 *old_wm = new_wm;
2094 }
2095
vlv_initial_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)2096 static void vlv_initial_watermarks(struct intel_atomic_state *state,
2097 struct intel_crtc *crtc)
2098 {
2099 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2100 const struct intel_crtc_state *crtc_state =
2101 intel_atomic_get_new_crtc_state(state, crtc);
2102
2103 mutex_lock(&dev_priv->display.wm.wm_mutex);
2104 crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
2105 vlv_program_watermarks(dev_priv);
2106 mutex_unlock(&dev_priv->display.wm.wm_mutex);
2107 }
2108
vlv_optimize_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)2109 static void vlv_optimize_watermarks(struct intel_atomic_state *state,
2110 struct intel_crtc *crtc)
2111 {
2112 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2113 const struct intel_crtc_state *crtc_state =
2114 intel_atomic_get_new_crtc_state(state, crtc);
2115
2116 if (!crtc_state->wm.need_postvbl_update)
2117 return;
2118
2119 mutex_lock(&dev_priv->display.wm.wm_mutex);
2120 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
2121 vlv_program_watermarks(dev_priv);
2122 mutex_unlock(&dev_priv->display.wm.wm_mutex);
2123 }
2124
i965_update_wm(struct drm_i915_private * dev_priv)2125 static void i965_update_wm(struct drm_i915_private *dev_priv)
2126 {
2127 struct intel_crtc *crtc;
2128 int srwm = 1;
2129 int cursor_sr = 16;
2130 bool cxsr_enabled;
2131
2132 /* Calc sr entries for one plane configs */
2133 crtc = single_enabled_crtc(dev_priv);
2134 if (crtc) {
2135 /* self-refresh has much higher latency */
2136 static const int sr_latency_ns = 12000;
2137 const struct drm_display_mode *pipe_mode =
2138 &crtc->config->hw.pipe_mode;
2139 const struct drm_framebuffer *fb =
2140 crtc->base.primary->state->fb;
2141 int pixel_rate = crtc->config->pixel_rate;
2142 int htotal = pipe_mode->crtc_htotal;
2143 int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
2144 int cpp = fb->format->cpp[0];
2145 int entries;
2146
2147 entries = intel_wm_method2(pixel_rate, htotal,
2148 width, cpp, sr_latency_ns / 100);
2149 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
2150 srwm = I965_FIFO_SIZE - entries;
2151 if (srwm < 0)
2152 srwm = 1;
2153 srwm &= 0x1ff;
2154 drm_dbg_kms(&dev_priv->drm,
2155 "self-refresh entries: %d, wm: %d\n",
2156 entries, srwm);
2157
2158 entries = intel_wm_method2(pixel_rate, htotal,
2159 crtc->base.cursor->state->crtc_w, 4,
2160 sr_latency_ns / 100);
2161 entries = DIV_ROUND_UP(entries,
2162 i965_cursor_wm_info.cacheline_size) +
2163 i965_cursor_wm_info.guard_size;
2164
2165 cursor_sr = i965_cursor_wm_info.fifo_size - entries;
2166 if (cursor_sr > i965_cursor_wm_info.max_wm)
2167 cursor_sr = i965_cursor_wm_info.max_wm;
2168
2169 drm_dbg_kms(&dev_priv->drm,
2170 "self-refresh watermark: display plane %d "
2171 "cursor %d\n", srwm, cursor_sr);
2172
2173 cxsr_enabled = true;
2174 } else {
2175 cxsr_enabled = false;
2176 /* Turn off self refresh if both pipes are enabled */
2177 intel_set_memory_cxsr(dev_priv, false);
2178 }
2179
2180 drm_dbg_kms(&dev_priv->drm,
2181 "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2182 srwm);
2183
2184 /* 965 has limitations... */
2185 intel_uncore_write(&dev_priv->uncore, DSPFW1(dev_priv),
2186 FW_WM(srwm, SR) |
2187 FW_WM(8, CURSORB) |
2188 FW_WM(8, PLANEB) |
2189 FW_WM(8, PLANEA));
2190 intel_uncore_write(&dev_priv->uncore, DSPFW2(dev_priv),
2191 FW_WM(8, CURSORA) |
2192 FW_WM(8, PLANEC_OLD));
2193 /* update cursor SR watermark */
2194 intel_uncore_write(&dev_priv->uncore, DSPFW3(dev_priv),
2195 FW_WM(cursor_sr, CURSOR_SR));
2196
2197 if (cxsr_enabled)
2198 intel_set_memory_cxsr(dev_priv, true);
2199 }
2200
2201 #undef FW_WM
2202
intel_crtc_for_plane(struct drm_i915_private * i915,enum i9xx_plane_id i9xx_plane)2203 static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915,
2204 enum i9xx_plane_id i9xx_plane)
2205 {
2206 struct intel_display *display = &i915->display;
2207 struct intel_plane *plane;
2208
2209 for_each_intel_plane(&i915->drm, plane) {
2210 if (plane->id == PLANE_PRIMARY &&
2211 plane->i9xx_plane == i9xx_plane)
2212 return intel_crtc_for_pipe(display, plane->pipe);
2213 }
2214
2215 return NULL;
2216 }
2217
i9xx_update_wm(struct drm_i915_private * dev_priv)2218 static void i9xx_update_wm(struct drm_i915_private *dev_priv)
2219 {
2220 const struct intel_watermark_params *wm_info;
2221 u32 fwater_lo;
2222 u32 fwater_hi;
2223 int cwm, srwm = 1;
2224 int fifo_size;
2225 int planea_wm, planeb_wm;
2226 struct intel_crtc *crtc;
2227
2228 if (IS_I945GM(dev_priv))
2229 wm_info = &i945_wm_info;
2230 else if (DISPLAY_VER(dev_priv) != 2)
2231 wm_info = &i915_wm_info;
2232 else
2233 wm_info = &i830_a_wm_info;
2234
2235 if (DISPLAY_VER(dev_priv) == 2)
2236 fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
2237 else
2238 fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
2239 crtc = intel_crtc_for_plane(dev_priv, PLANE_A);
2240 if (intel_crtc_active(crtc)) {
2241 const struct drm_framebuffer *fb =
2242 crtc->base.primary->state->fb;
2243 int cpp;
2244
2245 if (DISPLAY_VER(dev_priv) == 2)
2246 cpp = 4;
2247 else
2248 cpp = fb->format->cpp[0];
2249
2250 planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
2251 wm_info, fifo_size, cpp,
2252 pessimal_latency_ns);
2253 } else {
2254 planea_wm = fifo_size - wm_info->guard_size;
2255 if (planea_wm > (long)wm_info->max_wm)
2256 planea_wm = wm_info->max_wm;
2257 }
2258
2259 if (DISPLAY_VER(dev_priv) == 2)
2260 wm_info = &i830_bc_wm_info;
2261
2262 if (DISPLAY_VER(dev_priv) == 2)
2263 fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
2264 else
2265 fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
2266 crtc = intel_crtc_for_plane(dev_priv, PLANE_B);
2267 if (intel_crtc_active(crtc)) {
2268 const struct drm_framebuffer *fb =
2269 crtc->base.primary->state->fb;
2270 int cpp;
2271
2272 if (DISPLAY_VER(dev_priv) == 2)
2273 cpp = 4;
2274 else
2275 cpp = fb->format->cpp[0];
2276
2277 planeb_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
2278 wm_info, fifo_size, cpp,
2279 pessimal_latency_ns);
2280 } else {
2281 planeb_wm = fifo_size - wm_info->guard_size;
2282 if (planeb_wm > (long)wm_info->max_wm)
2283 planeb_wm = wm_info->max_wm;
2284 }
2285
2286 drm_dbg_kms(&dev_priv->drm,
2287 "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2288
2289 crtc = single_enabled_crtc(dev_priv);
2290 if (IS_I915GM(dev_priv) && crtc) {
2291 struct drm_gem_object *obj;
2292
2293 obj = intel_fb_bo(crtc->base.primary->state->fb);
2294
2295 /* self-refresh seems busted with untiled */
2296 if (!intel_bo_is_tiled(obj))
2297 crtc = NULL;
2298 }
2299
2300 /*
2301 * Overlay gets an aggressive default since video jitter is bad.
2302 */
2303 cwm = 2;
2304
2305 /* Play safe and disable self-refresh before adjusting watermarks. */
2306 intel_set_memory_cxsr(dev_priv, false);
2307
2308 /* Calc sr entries for one plane configs */
2309 if (HAS_FW_BLC(dev_priv) && crtc) {
2310 /* self-refresh has much higher latency */
2311 static const int sr_latency_ns = 6000;
2312 const struct drm_display_mode *pipe_mode =
2313 &crtc->config->hw.pipe_mode;
2314 const struct drm_framebuffer *fb =
2315 crtc->base.primary->state->fb;
2316 int pixel_rate = crtc->config->pixel_rate;
2317 int htotal = pipe_mode->crtc_htotal;
2318 int width = drm_rect_width(&crtc->base.primary->state->src) >> 16;
2319 int cpp;
2320 int entries;
2321
2322 if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
2323 cpp = 4;
2324 else
2325 cpp = fb->format->cpp[0];
2326
2327 entries = intel_wm_method2(pixel_rate, htotal, width, cpp,
2328 sr_latency_ns / 100);
2329 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
2330 drm_dbg_kms(&dev_priv->drm,
2331 "self-refresh entries: %d\n", entries);
2332 srwm = wm_info->fifo_size - entries;
2333 if (srwm < 0)
2334 srwm = 1;
2335
2336 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv))
2337 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF,
2338 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
2339 else
2340 intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f);
2341 }
2342
2343 drm_dbg_kms(&dev_priv->drm,
2344 "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2345 planea_wm, planeb_wm, cwm, srwm);
2346
2347 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
2348 fwater_hi = (cwm & 0x1f);
2349
2350 /* Set request length to 8 cachelines per fetch */
2351 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
2352 fwater_hi = fwater_hi | (1 << 8);
2353
2354 intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
2355 intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi);
2356
2357 if (crtc)
2358 intel_set_memory_cxsr(dev_priv, true);
2359 }
2360
i845_update_wm(struct drm_i915_private * dev_priv)2361 static void i845_update_wm(struct drm_i915_private *dev_priv)
2362 {
2363 struct intel_crtc *crtc;
2364 u32 fwater_lo;
2365 int planea_wm;
2366
2367 crtc = single_enabled_crtc(dev_priv);
2368 if (crtc == NULL)
2369 return;
2370
2371 planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
2372 &i845_wm_info,
2373 i845_get_fifo_size(dev_priv, PLANE_A),
2374 4, pessimal_latency_ns);
2375 fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
2376 fwater_lo |= (3<<8) | planea_wm;
2377
2378 drm_dbg_kms(&dev_priv->drm,
2379 "Setting FIFO watermarks - A: %d\n", planea_wm);
2380
2381 intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo);
2382 }
2383
2384 /* latency must be in 0.1us units. */
ilk_wm_method1(unsigned int pixel_rate,unsigned int cpp,unsigned int latency)2385 static unsigned int ilk_wm_method1(unsigned int pixel_rate,
2386 unsigned int cpp,
2387 unsigned int latency)
2388 {
2389 unsigned int ret;
2390
2391 ret = intel_wm_method1(pixel_rate, cpp, latency);
2392 ret = DIV_ROUND_UP(ret, 64) + 2;
2393
2394 return ret;
2395 }
2396
2397 /* latency must be in 0.1us units. */
ilk_wm_method2(unsigned int pixel_rate,unsigned int htotal,unsigned int width,unsigned int cpp,unsigned int latency)2398 static unsigned int ilk_wm_method2(unsigned int pixel_rate,
2399 unsigned int htotal,
2400 unsigned int width,
2401 unsigned int cpp,
2402 unsigned int latency)
2403 {
2404 unsigned int ret;
2405
2406 ret = intel_wm_method2(pixel_rate, htotal,
2407 width, cpp, latency);
2408 ret = DIV_ROUND_UP(ret, 64) + 2;
2409
2410 return ret;
2411 }
2412
ilk_wm_fbc(u32 pri_val,u32 horiz_pixels,u8 cpp)2413 static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
2414 {
2415 /*
2416 * Neither of these should be possible since this function shouldn't be
2417 * called if the CRTC is off or the plane is invisible. But let's be
2418 * extra paranoid to avoid a potential divide-by-zero if we screw up
2419 * elsewhere in the driver.
2420 */
2421 if (WARN_ON(!cpp))
2422 return 0;
2423 if (WARN_ON(!horiz_pixels))
2424 return 0;
2425
2426 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
2427 }
2428
2429 struct ilk_wm_maximums {
2430 u16 pri;
2431 u16 spr;
2432 u16 cur;
2433 u16 fbc;
2434 };
2435
2436 /*
2437 * For both WM_PIPE and WM_LP.
2438 * mem_value must be in 0.1us units.
2439 */
ilk_compute_pri_wm(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,u32 mem_value,bool is_lp)2440 static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
2441 const struct intel_plane_state *plane_state,
2442 u32 mem_value, bool is_lp)
2443 {
2444 u32 method1, method2;
2445 int cpp;
2446
2447 if (mem_value == 0)
2448 return U32_MAX;
2449
2450 if (!intel_wm_plane_visible(crtc_state, plane_state))
2451 return 0;
2452
2453 cpp = plane_state->hw.fb->format->cpp[0];
2454
2455 method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
2456
2457 if (!is_lp)
2458 return method1;
2459
2460 method2 = ilk_wm_method2(crtc_state->pixel_rate,
2461 crtc_state->hw.pipe_mode.crtc_htotal,
2462 drm_rect_width(&plane_state->uapi.src) >> 16,
2463 cpp, mem_value);
2464
2465 return min(method1, method2);
2466 }
2467
2468 /*
2469 * For both WM_PIPE and WM_LP.
2470 * mem_value must be in 0.1us units.
2471 */
ilk_compute_spr_wm(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,u32 mem_value)2472 static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
2473 const struct intel_plane_state *plane_state,
2474 u32 mem_value)
2475 {
2476 u32 method1, method2;
2477 int cpp;
2478
2479 if (mem_value == 0)
2480 return U32_MAX;
2481
2482 if (!intel_wm_plane_visible(crtc_state, plane_state))
2483 return 0;
2484
2485 cpp = plane_state->hw.fb->format->cpp[0];
2486
2487 method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
2488 method2 = ilk_wm_method2(crtc_state->pixel_rate,
2489 crtc_state->hw.pipe_mode.crtc_htotal,
2490 drm_rect_width(&plane_state->uapi.src) >> 16,
2491 cpp, mem_value);
2492 return min(method1, method2);
2493 }
2494
2495 /*
2496 * For both WM_PIPE and WM_LP.
2497 * mem_value must be in 0.1us units.
2498 */
ilk_compute_cur_wm(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,u32 mem_value)2499 static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
2500 const struct intel_plane_state *plane_state,
2501 u32 mem_value)
2502 {
2503 int cpp;
2504
2505 if (mem_value == 0)
2506 return U32_MAX;
2507
2508 if (!intel_wm_plane_visible(crtc_state, plane_state))
2509 return 0;
2510
2511 cpp = plane_state->hw.fb->format->cpp[0];
2512
2513 return ilk_wm_method2(crtc_state->pixel_rate,
2514 crtc_state->hw.pipe_mode.crtc_htotal,
2515 drm_rect_width(&plane_state->uapi.src) >> 16,
2516 cpp, mem_value);
2517 }
2518
2519 /* Only for WM_LP. */
ilk_compute_fbc_wm(const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,u32 pri_val)2520 static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
2521 const struct intel_plane_state *plane_state,
2522 u32 pri_val)
2523 {
2524 int cpp;
2525
2526 if (!intel_wm_plane_visible(crtc_state, plane_state))
2527 return 0;
2528
2529 cpp = plane_state->hw.fb->format->cpp[0];
2530
2531 return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16,
2532 cpp);
2533 }
2534
2535 static unsigned int
ilk_display_fifo_size(const struct drm_i915_private * dev_priv)2536 ilk_display_fifo_size(const struct drm_i915_private *dev_priv)
2537 {
2538 if (DISPLAY_VER(dev_priv) >= 8)
2539 return 3072;
2540 else if (DISPLAY_VER(dev_priv) >= 7)
2541 return 768;
2542 else
2543 return 512;
2544 }
2545
2546 static unsigned int
ilk_plane_wm_reg_max(const struct drm_i915_private * dev_priv,int level,bool is_sprite)2547 ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv,
2548 int level, bool is_sprite)
2549 {
2550 if (DISPLAY_VER(dev_priv) >= 8)
2551 /* BDW primary/sprite plane watermarks */
2552 return level == 0 ? 255 : 2047;
2553 else if (DISPLAY_VER(dev_priv) >= 7)
2554 /* IVB/HSW primary/sprite plane watermarks */
2555 return level == 0 ? 127 : 1023;
2556 else if (!is_sprite)
2557 /* ILK/SNB primary plane watermarks */
2558 return level == 0 ? 127 : 511;
2559 else
2560 /* ILK/SNB sprite plane watermarks */
2561 return level == 0 ? 63 : 255;
2562 }
2563
2564 static unsigned int
ilk_cursor_wm_reg_max(const struct drm_i915_private * dev_priv,int level)2565 ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level)
2566 {
2567 if (DISPLAY_VER(dev_priv) >= 7)
2568 return level == 0 ? 63 : 255;
2569 else
2570 return level == 0 ? 31 : 63;
2571 }
2572
ilk_fbc_wm_reg_max(const struct drm_i915_private * dev_priv)2573 static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
2574 {
2575 if (DISPLAY_VER(dev_priv) >= 8)
2576 return 31;
2577 else
2578 return 15;
2579 }
2580
2581 /* Calculate the maximum primary/sprite plane watermark */
ilk_plane_wm_max(const struct drm_i915_private * dev_priv,int level,const struct intel_wm_config * config,enum intel_ddb_partitioning ddb_partitioning,bool is_sprite)2582 static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
2583 int level,
2584 const struct intel_wm_config *config,
2585 enum intel_ddb_partitioning ddb_partitioning,
2586 bool is_sprite)
2587 {
2588 unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
2589
2590 /* if sprites aren't enabled, sprites get nothing */
2591 if (is_sprite && !config->sprites_enabled)
2592 return 0;
2593
2594 /* HSW allows LP1+ watermarks even with multiple pipes */
2595 if (level == 0 || config->num_pipes_active > 1) {
2596 fifo_size /= INTEL_NUM_PIPES(dev_priv);
2597
2598 /*
2599 * For some reason the non self refresh
2600 * FIFO size is only half of the self
2601 * refresh FIFO size on ILK/SNB.
2602 */
2603 if (DISPLAY_VER(dev_priv) < 7)
2604 fifo_size /= 2;
2605 }
2606
2607 if (config->sprites_enabled) {
2608 /* level 0 is always calculated with 1:1 split */
2609 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2610 if (is_sprite)
2611 fifo_size *= 5;
2612 fifo_size /= 6;
2613 } else {
2614 fifo_size /= 2;
2615 }
2616 }
2617
2618 /* clamp to max that the registers can hold */
2619 return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite));
2620 }
2621
2622 /* Calculate the maximum cursor plane watermark */
ilk_cursor_wm_max(const struct drm_i915_private * dev_priv,int level,const struct intel_wm_config * config)2623 static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
2624 int level,
2625 const struct intel_wm_config *config)
2626 {
2627 /* HSW LP1+ watermarks w/ multiple pipes */
2628 if (level > 0 && config->num_pipes_active > 1)
2629 return 64;
2630
2631 /* otherwise just report max that registers can hold */
2632 return ilk_cursor_wm_reg_max(dev_priv, level);
2633 }
2634
ilk_compute_wm_maximums(const struct drm_i915_private * dev_priv,int level,const struct intel_wm_config * config,enum intel_ddb_partitioning ddb_partitioning,struct ilk_wm_maximums * max)2635 static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
2636 int level,
2637 const struct intel_wm_config *config,
2638 enum intel_ddb_partitioning ddb_partitioning,
2639 struct ilk_wm_maximums *max)
2640 {
2641 max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
2642 max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
2643 max->cur = ilk_cursor_wm_max(dev_priv, level, config);
2644 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2645 }
2646
ilk_compute_wm_reg_maximums(const struct drm_i915_private * dev_priv,int level,struct ilk_wm_maximums * max)2647 static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
2648 int level,
2649 struct ilk_wm_maximums *max)
2650 {
2651 max->pri = ilk_plane_wm_reg_max(dev_priv, level, false);
2652 max->spr = ilk_plane_wm_reg_max(dev_priv, level, true);
2653 max->cur = ilk_cursor_wm_reg_max(dev_priv, level);
2654 max->fbc = ilk_fbc_wm_reg_max(dev_priv);
2655 }
2656
ilk_validate_wm_level(struct drm_i915_private * i915,int level,const struct ilk_wm_maximums * max,struct intel_wm_level * result)2657 static bool ilk_validate_wm_level(struct drm_i915_private *i915,
2658 int level,
2659 const struct ilk_wm_maximums *max,
2660 struct intel_wm_level *result)
2661 {
2662 bool ret;
2663
2664 /* already determined to be invalid? */
2665 if (!result->enable)
2666 return false;
2667
2668 result->enable = result->pri_val <= max->pri &&
2669 result->spr_val <= max->spr &&
2670 result->cur_val <= max->cur;
2671
2672 ret = result->enable;
2673
2674 /*
2675 * HACK until we can pre-compute everything,
2676 * and thus fail gracefully if LP0 watermarks
2677 * are exceeded...
2678 */
2679 if (level == 0 && !result->enable) {
2680 if (result->pri_val > max->pri)
2681 drm_dbg_kms(&i915->drm,
2682 "Primary WM%d too large %u (max %u)\n",
2683 level, result->pri_val, max->pri);
2684 if (result->spr_val > max->spr)
2685 drm_dbg_kms(&i915->drm,
2686 "Sprite WM%d too large %u (max %u)\n",
2687 level, result->spr_val, max->spr);
2688 if (result->cur_val > max->cur)
2689 drm_dbg_kms(&i915->drm,
2690 "Cursor WM%d too large %u (max %u)\n",
2691 level, result->cur_val, max->cur);
2692
2693 result->pri_val = min_t(u32, result->pri_val, max->pri);
2694 result->spr_val = min_t(u32, result->spr_val, max->spr);
2695 result->cur_val = min_t(u32, result->cur_val, max->cur);
2696 result->enable = true;
2697 }
2698
2699 return ret;
2700 }
2701
ilk_compute_wm_level(const struct drm_i915_private * dev_priv,const struct intel_crtc * crtc,int level,struct intel_crtc_state * crtc_state,const struct intel_plane_state * pristate,const struct intel_plane_state * sprstate,const struct intel_plane_state * curstate,struct intel_wm_level * result)2702 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2703 const struct intel_crtc *crtc,
2704 int level,
2705 struct intel_crtc_state *crtc_state,
2706 const struct intel_plane_state *pristate,
2707 const struct intel_plane_state *sprstate,
2708 const struct intel_plane_state *curstate,
2709 struct intel_wm_level *result)
2710 {
2711 u16 pri_latency = dev_priv->display.wm.pri_latency[level];
2712 u16 spr_latency = dev_priv->display.wm.spr_latency[level];
2713 u16 cur_latency = dev_priv->display.wm.cur_latency[level];
2714
2715 /* WM1+ latency values stored in 0.5us units */
2716 if (level > 0) {
2717 pri_latency *= 5;
2718 spr_latency *= 5;
2719 cur_latency *= 5;
2720 }
2721
2722 if (pristate) {
2723 result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
2724 pri_latency, level);
2725 result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
2726 }
2727
2728 if (sprstate)
2729 result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
2730
2731 if (curstate)
2732 result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
2733
2734 result->enable = true;
2735 }
2736
hsw_read_wm_latency(struct drm_i915_private * i915,u16 wm[])2737 static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
2738 {
2739 u64 sskpd;
2740
2741 i915->display.wm.num_levels = 5;
2742
2743 sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD);
2744
2745 wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd);
2746 if (wm[0] == 0)
2747 wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd);
2748 wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd);
2749 wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd);
2750 wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd);
2751 wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd);
2752 }
2753
snb_read_wm_latency(struct drm_i915_private * i915,u16 wm[])2754 static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
2755 {
2756 u32 sskpd;
2757
2758 i915->display.wm.num_levels = 4;
2759
2760 sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD);
2761
2762 wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd);
2763 wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd);
2764 wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd);
2765 wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd);
2766 }
2767
ilk_read_wm_latency(struct drm_i915_private * i915,u16 wm[])2768 static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
2769 {
2770 u32 mltr;
2771
2772 i915->display.wm.num_levels = 3;
2773
2774 mltr = intel_uncore_read(&i915->uncore, MLTR_ILK);
2775
2776 /* ILK primary LP0 latency is 700 ns */
2777 wm[0] = 7;
2778 wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr);
2779 wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr);
2780 }
2781
intel_fixup_spr_wm_latency(struct drm_i915_private * dev_priv,u16 wm[5])2782 static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
2783 u16 wm[5])
2784 {
2785 /* ILK sprite LP0 latency is 1300 ns */
2786 if (DISPLAY_VER(dev_priv) == 5)
2787 wm[0] = 13;
2788 }
2789
intel_fixup_cur_wm_latency(struct drm_i915_private * dev_priv,u16 wm[5])2790 static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
2791 u16 wm[5])
2792 {
2793 /* ILK cursor LP0 latency is 1300 ns */
2794 if (DISPLAY_VER(dev_priv) == 5)
2795 wm[0] = 13;
2796 }
2797
ilk_increase_wm_latency(struct drm_i915_private * dev_priv,u16 wm[5],u16 min)2798 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2799 u16 wm[5], u16 min)
2800 {
2801 int level;
2802
2803 if (wm[0] >= min)
2804 return false;
2805
2806 wm[0] = max(wm[0], min);
2807 for (level = 1; level < dev_priv->display.wm.num_levels; level++)
2808 wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
2809
2810 return true;
2811 }
2812
snb_wm_latency_quirk(struct drm_i915_private * dev_priv)2813 static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
2814 {
2815 bool changed;
2816
2817 /*
2818 * The BIOS provided WM memory latency values are often
2819 * inadequate for high resolution displays. Adjust them.
2820 */
2821 changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12);
2822 changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12);
2823 changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12);
2824
2825 if (!changed)
2826 return;
2827
2828 drm_dbg_kms(&dev_priv->drm,
2829 "WM latency values increased to avoid potential underruns\n");
2830 intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
2831 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
2832 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
2833 }
2834
snb_wm_lp3_irq_quirk(struct drm_i915_private * dev_priv)2835 static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
2836 {
2837 /*
2838 * On some SNB machines (Thinkpad X220 Tablet at least)
2839 * LP3 usage can cause vblank interrupts to be lost.
2840 * The DEIIR bit will go high but it looks like the CPU
2841 * never gets interrupted.
2842 *
2843 * It's not clear whether other interrupt source could
2844 * be affected or if this is somehow limited to vblank
2845 * interrupts only. To play it safe we disable LP3
2846 * watermarks entirely.
2847 */
2848 if (dev_priv->display.wm.pri_latency[3] == 0 &&
2849 dev_priv->display.wm.spr_latency[3] == 0 &&
2850 dev_priv->display.wm.cur_latency[3] == 0)
2851 return;
2852
2853 dev_priv->display.wm.pri_latency[3] = 0;
2854 dev_priv->display.wm.spr_latency[3] = 0;
2855 dev_priv->display.wm.cur_latency[3] = 0;
2856
2857 drm_dbg_kms(&dev_priv->drm,
2858 "LP3 watermarks disabled due to potential for lost interrupts\n");
2859 intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
2860 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
2861 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
2862 }
2863
ilk_setup_wm_latency(struct drm_i915_private * dev_priv)2864 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
2865 {
2866 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
2867 hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
2868 else if (DISPLAY_VER(dev_priv) >= 6)
2869 snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
2870 else
2871 ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency);
2872
2873 memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency,
2874 sizeof(dev_priv->display.wm.pri_latency));
2875 memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency,
2876 sizeof(dev_priv->display.wm.pri_latency));
2877
2878 intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency);
2879 intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency);
2880
2881 intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency);
2882 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency);
2883 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency);
2884
2885 if (DISPLAY_VER(dev_priv) == 6) {
2886 snb_wm_latency_quirk(dev_priv);
2887 snb_wm_lp3_irq_quirk(dev_priv);
2888 }
2889 }
2890
ilk_validate_pipe_wm(struct drm_i915_private * dev_priv,struct intel_pipe_wm * pipe_wm)2891 static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
2892 struct intel_pipe_wm *pipe_wm)
2893 {
2894 /* LP0 watermark maximums depend on this pipe alone */
2895 const struct intel_wm_config config = {
2896 .num_pipes_active = 1,
2897 .sprites_enabled = pipe_wm->sprites_enabled,
2898 .sprites_scaled = pipe_wm->sprites_scaled,
2899 };
2900 struct ilk_wm_maximums max;
2901
2902 /* LP0 watermarks always use 1/2 DDB partitioning */
2903 ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
2904
2905 /* At least LP0 must be valid */
2906 if (!ilk_validate_wm_level(dev_priv, 0, &max, &pipe_wm->wm[0])) {
2907 drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
2908 return false;
2909 }
2910
2911 return true;
2912 }
2913
2914 /* Compute new watermarks for the pipe */
ilk_compute_pipe_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)2915 static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
2916 struct intel_crtc *crtc)
2917 {
2918 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2919 struct intel_crtc_state *crtc_state =
2920 intel_atomic_get_new_crtc_state(state, crtc);
2921 struct intel_pipe_wm *pipe_wm;
2922 struct intel_plane *plane;
2923 const struct intel_plane_state *plane_state;
2924 const struct intel_plane_state *pristate = NULL;
2925 const struct intel_plane_state *sprstate = NULL;
2926 const struct intel_plane_state *curstate = NULL;
2927 struct ilk_wm_maximums max;
2928 int level, usable_level;
2929
2930 pipe_wm = &crtc_state->wm.ilk.optimal;
2931
2932 intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
2933 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
2934 pristate = plane_state;
2935 else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY)
2936 sprstate = plane_state;
2937 else if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
2938 curstate = plane_state;
2939 }
2940
2941 pipe_wm->pipe_enabled = crtc_state->hw.active;
2942 pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0);
2943 pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0);
2944
2945 usable_level = dev_priv->display.wm.num_levels - 1;
2946
2947 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2948 if (DISPLAY_VER(dev_priv) < 7 && pipe_wm->sprites_enabled)
2949 usable_level = 1;
2950
2951 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2952 if (pipe_wm->sprites_scaled)
2953 usable_level = 0;
2954
2955 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
2956 ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
2957 pristate, sprstate, curstate, &pipe_wm->wm[0]);
2958
2959 if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
2960 return -EINVAL;
2961
2962 ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
2963
2964 for (level = 1; level <= usable_level; level++) {
2965 struct intel_wm_level *wm = &pipe_wm->wm[level];
2966
2967 ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
2968 pristate, sprstate, curstate, wm);
2969
2970 /*
2971 * Disable any watermark level that exceeds the
2972 * register maximums since such watermarks are
2973 * always invalid.
2974 */
2975 if (!ilk_validate_wm_level(dev_priv, level, &max, wm)) {
2976 memset(wm, 0, sizeof(*wm));
2977 break;
2978 }
2979 }
2980
2981 return 0;
2982 }
2983
2984 /*
2985 * Build a set of 'intermediate' watermark values that satisfy both the old
2986 * state and the new state. These can be programmed to the hardware
2987 * immediately.
2988 */
ilk_compute_intermediate_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)2989 static int ilk_compute_intermediate_wm(struct intel_atomic_state *state,
2990 struct intel_crtc *crtc)
2991 {
2992 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2993 struct intel_crtc_state *new_crtc_state =
2994 intel_atomic_get_new_crtc_state(state, crtc);
2995 const struct intel_crtc_state *old_crtc_state =
2996 intel_atomic_get_old_crtc_state(state, crtc);
2997 struct intel_pipe_wm *intermediate = &new_crtc_state->wm.ilk.intermediate;
2998 const struct intel_pipe_wm *optimal = &new_crtc_state->wm.ilk.optimal;
2999 const struct intel_pipe_wm *active = &old_crtc_state->wm.ilk.optimal;
3000 int level;
3001
3002 /*
3003 * Start with the final, target watermarks, then combine with the
3004 * currently active watermarks to get values that are safe both before
3005 * and after the vblank.
3006 */
3007 *intermediate = *optimal;
3008 if (!new_crtc_state->hw.active ||
3009 intel_crtc_needs_modeset(new_crtc_state) ||
3010 state->skip_intermediate_wm)
3011 return 0;
3012
3013 intermediate->pipe_enabled |= active->pipe_enabled;
3014 intermediate->sprites_enabled |= active->sprites_enabled;
3015 intermediate->sprites_scaled |= active->sprites_scaled;
3016
3017 for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
3018 struct intel_wm_level *intermediate_wm = &intermediate->wm[level];
3019 const struct intel_wm_level *active_wm = &active->wm[level];
3020
3021 intermediate_wm->enable &= active_wm->enable;
3022 intermediate_wm->pri_val = max(intermediate_wm->pri_val,
3023 active_wm->pri_val);
3024 intermediate_wm->spr_val = max(intermediate_wm->spr_val,
3025 active_wm->spr_val);
3026 intermediate_wm->cur_val = max(intermediate_wm->cur_val,
3027 active_wm->cur_val);
3028 intermediate_wm->fbc_val = max(intermediate_wm->fbc_val,
3029 active_wm->fbc_val);
3030 }
3031
3032 /*
3033 * We need to make sure that these merged watermark values are
3034 * actually a valid configuration themselves. If they're not,
3035 * there's no safe way to transition from the old state to
3036 * the new state, so we need to fail the atomic transaction.
3037 */
3038 if (!ilk_validate_pipe_wm(dev_priv, intermediate))
3039 return -EINVAL;
3040
3041 /*
3042 * If our intermediate WM are identical to the final WM, then we can
3043 * omit the post-vblank programming; only update if it's different.
3044 */
3045 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
3046 new_crtc_state->wm.need_postvbl_update = true;
3047
3048 return 0;
3049 }
3050
ilk_compute_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)3051 static int ilk_compute_watermarks(struct intel_atomic_state *state,
3052 struct intel_crtc *crtc)
3053 {
3054 int ret;
3055
3056 ret = ilk_compute_pipe_wm(state, crtc);
3057 if (ret)
3058 return ret;
3059
3060 ret = ilk_compute_intermediate_wm(state, crtc);
3061 if (ret)
3062 return ret;
3063
3064 return 0;
3065 }
3066
3067 /*
3068 * Merge the watermarks from all active pipes for a specific level.
3069 */
ilk_merge_wm_level(struct drm_i915_private * dev_priv,int level,struct intel_wm_level * ret_wm)3070 static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
3071 int level,
3072 struct intel_wm_level *ret_wm)
3073 {
3074 const struct intel_crtc *crtc;
3075
3076 ret_wm->enable = true;
3077
3078 for_each_intel_crtc(&dev_priv->drm, crtc) {
3079 const struct intel_pipe_wm *active = &crtc->wm.active.ilk;
3080 const struct intel_wm_level *wm = &active->wm[level];
3081
3082 if (!active->pipe_enabled)
3083 continue;
3084
3085 /*
3086 * The watermark values may have been used in the past,
3087 * so we must maintain them in the registers for some
3088 * time even if the level is now disabled.
3089 */
3090 if (!wm->enable)
3091 ret_wm->enable = false;
3092
3093 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
3094 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
3095 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
3096 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
3097 }
3098 }
3099
3100 /*
3101 * Merge all low power watermarks for all active pipes.
3102 */
ilk_wm_merge(struct drm_i915_private * dev_priv,const struct intel_wm_config * config,const struct ilk_wm_maximums * max,struct intel_pipe_wm * merged)3103 static void ilk_wm_merge(struct drm_i915_private *dev_priv,
3104 const struct intel_wm_config *config,
3105 const struct ilk_wm_maximums *max,
3106 struct intel_pipe_wm *merged)
3107 {
3108 int level, num_levels = dev_priv->display.wm.num_levels;
3109 int last_enabled_level = num_levels - 1;
3110
3111 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
3112 if ((DISPLAY_VER(dev_priv) < 7 || IS_IVYBRIDGE(dev_priv)) &&
3113 config->num_pipes_active > 1)
3114 last_enabled_level = 0;
3115
3116 /* ILK: FBC WM must be disabled always */
3117 merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6;
3118
3119 /* merge each WM1+ level */
3120 for (level = 1; level < num_levels; level++) {
3121 struct intel_wm_level *wm = &merged->wm[level];
3122
3123 ilk_merge_wm_level(dev_priv, level, wm);
3124
3125 if (level > last_enabled_level)
3126 wm->enable = false;
3127 else if (!ilk_validate_wm_level(dev_priv, level, max, wm))
3128 /* make sure all following levels get disabled */
3129 last_enabled_level = level - 1;
3130
3131 /*
3132 * The spec says it is preferred to disable
3133 * FBC WMs instead of disabling a WM level.
3134 */
3135 if (wm->fbc_val > max->fbc) {
3136 if (wm->enable)
3137 merged->fbc_wm_enabled = false;
3138 wm->fbc_val = 0;
3139 }
3140 }
3141
3142 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
3143 if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
3144 dev_priv->display.params.enable_fbc && !merged->fbc_wm_enabled) {
3145 for (level = 2; level < num_levels; level++) {
3146 struct intel_wm_level *wm = &merged->wm[level];
3147
3148 wm->enable = false;
3149 }
3150 }
3151 }
3152
ilk_wm_lp_to_level(int wm_lp,const struct intel_pipe_wm * pipe_wm)3153 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
3154 {
3155 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
3156 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
3157 }
3158
3159 /* The value we need to program into the WM_LPx latency field */
ilk_wm_lp_latency(struct drm_i915_private * dev_priv,int level)3160 static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
3161 int level)
3162 {
3163 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3164 return 2 * level;
3165 else
3166 return dev_priv->display.wm.pri_latency[level];
3167 }
3168
ilk_compute_wm_results(struct drm_i915_private * dev_priv,const struct intel_pipe_wm * merged,enum intel_ddb_partitioning partitioning,struct ilk_wm_values * results)3169 static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
3170 const struct intel_pipe_wm *merged,
3171 enum intel_ddb_partitioning partitioning,
3172 struct ilk_wm_values *results)
3173 {
3174 struct intel_crtc *crtc;
3175 int level, wm_lp;
3176
3177 results->enable_fbc_wm = merged->fbc_wm_enabled;
3178 results->partitioning = partitioning;
3179
3180 /* LP1+ register values */
3181 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3182 const struct intel_wm_level *r;
3183
3184 level = ilk_wm_lp_to_level(wm_lp, merged);
3185
3186 r = &merged->wm[level];
3187
3188 /*
3189 * Maintain the watermark values even if the level is
3190 * disabled. Doing otherwise could cause underruns.
3191 */
3192 results->wm_lp[wm_lp - 1] =
3193 WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) |
3194 WM_LP_PRIMARY(r->pri_val) |
3195 WM_LP_CURSOR(r->cur_val);
3196
3197 if (r->enable)
3198 results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE;
3199
3200 if (DISPLAY_VER(dev_priv) >= 8)
3201 results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val);
3202 else
3203 results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val);
3204
3205 results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val);
3206
3207 /*
3208 * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
3209 * level is disabled. Doing otherwise could cause underruns.
3210 */
3211 if (DISPLAY_VER(dev_priv) < 7 && r->spr_val) {
3212 drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
3213 results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
3214 }
3215 }
3216
3217 /* LP0 register values */
3218 for_each_intel_crtc(&dev_priv->drm, crtc) {
3219 enum pipe pipe = crtc->pipe;
3220 const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk;
3221 const struct intel_wm_level *r = &pipe_wm->wm[0];
3222
3223 if (drm_WARN_ON(&dev_priv->drm, !r->enable))
3224 continue;
3225
3226 results->wm_pipe[pipe] =
3227 WM0_PIPE_PRIMARY(r->pri_val) |
3228 WM0_PIPE_SPRITE(r->spr_val) |
3229 WM0_PIPE_CURSOR(r->cur_val);
3230 }
3231 }
3232
3233 /*
3234 * Find the result with the highest level enabled. Check for enable_fbc_wm in
3235 * case both are at the same level. Prefer r1 in case they're the same.
3236 */
3237 static struct intel_pipe_wm *
ilk_find_best_result(struct drm_i915_private * dev_priv,struct intel_pipe_wm * r1,struct intel_pipe_wm * r2)3238 ilk_find_best_result(struct drm_i915_private *dev_priv,
3239 struct intel_pipe_wm *r1,
3240 struct intel_pipe_wm *r2)
3241 {
3242 int level, level1 = 0, level2 = 0;
3243
3244 for (level = 1; level < dev_priv->display.wm.num_levels; level++) {
3245 if (r1->wm[level].enable)
3246 level1 = level;
3247 if (r2->wm[level].enable)
3248 level2 = level;
3249 }
3250
3251 if (level1 == level2) {
3252 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
3253 return r2;
3254 else
3255 return r1;
3256 } else if (level1 > level2) {
3257 return r1;
3258 } else {
3259 return r2;
3260 }
3261 }
3262
3263 /* dirty bits used to track which watermarks need changes */
3264 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
3265 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
3266 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
3267 #define WM_DIRTY_FBC (1 << 24)
3268 #define WM_DIRTY_DDB (1 << 25)
3269
ilk_compute_wm_dirty(struct drm_i915_private * dev_priv,const struct ilk_wm_values * old,const struct ilk_wm_values * new)3270 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
3271 const struct ilk_wm_values *old,
3272 const struct ilk_wm_values *new)
3273 {
3274 unsigned int dirty = 0;
3275 enum pipe pipe;
3276 int wm_lp;
3277
3278 for_each_pipe(dev_priv, pipe) {
3279 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
3280 dirty |= WM_DIRTY_PIPE(pipe);
3281 /* Must disable LP1+ watermarks too */
3282 dirty |= WM_DIRTY_LP_ALL;
3283 }
3284 }
3285
3286 if (old->enable_fbc_wm != new->enable_fbc_wm) {
3287 dirty |= WM_DIRTY_FBC;
3288 /* Must disable LP1+ watermarks too */
3289 dirty |= WM_DIRTY_LP_ALL;
3290 }
3291
3292 if (old->partitioning != new->partitioning) {
3293 dirty |= WM_DIRTY_DDB;
3294 /* Must disable LP1+ watermarks too */
3295 dirty |= WM_DIRTY_LP_ALL;
3296 }
3297
3298 /* LP1+ watermarks already deemed dirty, no need to continue */
3299 if (dirty & WM_DIRTY_LP_ALL)
3300 return dirty;
3301
3302 /* Find the lowest numbered LP1+ watermark in need of an update... */
3303 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
3304 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
3305 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
3306 break;
3307 }
3308
3309 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
3310 for (; wm_lp <= 3; wm_lp++)
3311 dirty |= WM_DIRTY_LP(wm_lp);
3312
3313 return dirty;
3314 }
3315
_ilk_disable_lp_wm(struct drm_i915_private * dev_priv,unsigned int dirty)3316 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
3317 unsigned int dirty)
3318 {
3319 struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
3320 bool changed = false;
3321
3322 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) {
3323 previous->wm_lp[2] &= ~WM_LP_ENABLE;
3324 intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]);
3325 changed = true;
3326 }
3327 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) {
3328 previous->wm_lp[1] &= ~WM_LP_ENABLE;
3329 intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]);
3330 changed = true;
3331 }
3332 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) {
3333 previous->wm_lp[0] &= ~WM_LP_ENABLE;
3334 intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]);
3335 changed = true;
3336 }
3337
3338 /*
3339 * Don't touch WM_LP_SPRITE_ENABLE here.
3340 * Doing so could cause underruns.
3341 */
3342
3343 return changed;
3344 }
3345
3346 /*
3347 * The spec says we shouldn't write when we don't need, because every write
3348 * causes WMs to be re-evaluated, expending some power.
3349 */
ilk_write_wm_values(struct drm_i915_private * dev_priv,struct ilk_wm_values * results)3350 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
3351 struct ilk_wm_values *results)
3352 {
3353 struct ilk_wm_values *previous = &dev_priv->display.wm.hw;
3354 unsigned int dirty;
3355
3356 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
3357 if (!dirty)
3358 return;
3359
3360 _ilk_disable_lp_wm(dev_priv, dirty);
3361
3362 if (dirty & WM_DIRTY_PIPE(PIPE_A))
3363 intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
3364 if (dirty & WM_DIRTY_PIPE(PIPE_B))
3365 intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
3366 if (dirty & WM_DIRTY_PIPE(PIPE_C))
3367 intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
3368
3369 if (dirty & WM_DIRTY_DDB) {
3370 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3371 intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6,
3372 results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
3373 WM_MISC_DATA_PARTITION_5_6);
3374 else
3375 intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6,
3376 results->partitioning == INTEL_DDB_PART_1_2 ? 0 :
3377 DISP_DATA_PARTITION_5_6);
3378 }
3379
3380 if (dirty & WM_DIRTY_FBC)
3381 intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS,
3382 results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS);
3383
3384 if (dirty & WM_DIRTY_LP(1) &&
3385 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
3386 intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]);
3387
3388 if (DISPLAY_VER(dev_priv) >= 7) {
3389 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
3390 intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]);
3391 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
3392 intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]);
3393 }
3394
3395 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
3396 intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]);
3397 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
3398 intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]);
3399 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
3400 intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]);
3401
3402 dev_priv->display.wm.hw = *results;
3403 }
3404
ilk_disable_cxsr(struct drm_i915_private * dev_priv)3405 bool ilk_disable_cxsr(struct drm_i915_private *dev_priv)
3406 {
3407 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
3408 }
3409
ilk_compute_wm_config(struct drm_i915_private * dev_priv,struct intel_wm_config * config)3410 static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
3411 struct intel_wm_config *config)
3412 {
3413 struct intel_crtc *crtc;
3414
3415 /* Compute the currently _active_ config */
3416 for_each_intel_crtc(&dev_priv->drm, crtc) {
3417 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
3418
3419 if (!wm->pipe_enabled)
3420 continue;
3421
3422 config->sprites_enabled |= wm->sprites_enabled;
3423 config->sprites_scaled |= wm->sprites_scaled;
3424 config->num_pipes_active++;
3425 }
3426 }
3427
ilk_program_watermarks(struct drm_i915_private * dev_priv)3428 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
3429 {
3430 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
3431 struct ilk_wm_maximums max;
3432 struct intel_wm_config config = {};
3433 struct ilk_wm_values results = {};
3434 enum intel_ddb_partitioning partitioning;
3435
3436 ilk_compute_wm_config(dev_priv, &config);
3437
3438 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
3439 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
3440
3441 /* 5/6 split only in single pipe config on IVB+ */
3442 if (DISPLAY_VER(dev_priv) >= 7 &&
3443 config.num_pipes_active == 1 && config.sprites_enabled) {
3444 ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
3445 ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
3446
3447 best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
3448 } else {
3449 best_lp_wm = &lp_wm_1_2;
3450 }
3451
3452 partitioning = (best_lp_wm == &lp_wm_1_2) ?
3453 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
3454
3455 ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
3456
3457 ilk_write_wm_values(dev_priv, &results);
3458 }
3459
ilk_initial_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)3460 static void ilk_initial_watermarks(struct intel_atomic_state *state,
3461 struct intel_crtc *crtc)
3462 {
3463 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3464 const struct intel_crtc_state *crtc_state =
3465 intel_atomic_get_new_crtc_state(state, crtc);
3466
3467 mutex_lock(&dev_priv->display.wm.wm_mutex);
3468 crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
3469 ilk_program_watermarks(dev_priv);
3470 mutex_unlock(&dev_priv->display.wm.wm_mutex);
3471 }
3472
ilk_optimize_watermarks(struct intel_atomic_state * state,struct intel_crtc * crtc)3473 static void ilk_optimize_watermarks(struct intel_atomic_state *state,
3474 struct intel_crtc *crtc)
3475 {
3476 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3477 const struct intel_crtc_state *crtc_state =
3478 intel_atomic_get_new_crtc_state(state, crtc);
3479
3480 if (!crtc_state->wm.need_postvbl_update)
3481 return;
3482
3483 mutex_lock(&dev_priv->display.wm.wm_mutex);
3484 crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
3485 ilk_program_watermarks(dev_priv);
3486 mutex_unlock(&dev_priv->display.wm.wm_mutex);
3487 }
3488
ilk_pipe_wm_get_hw_state(struct intel_crtc * crtc)3489 static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
3490 {
3491 struct drm_device *dev = crtc->base.dev;
3492 struct drm_i915_private *dev_priv = to_i915(dev);
3493 struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
3494 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
3495 struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
3496 enum pipe pipe = crtc->pipe;
3497
3498 hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe));
3499
3500 memset(active, 0, sizeof(*active));
3501
3502 active->pipe_enabled = crtc->active;
3503
3504 if (active->pipe_enabled) {
3505 u32 tmp = hw->wm_pipe[pipe];
3506
3507 /*
3508 * For active pipes LP0 watermark is marked as
3509 * enabled, and LP1+ watermaks as disabled since
3510 * we can't really reverse compute them in case
3511 * multiple pipes are active.
3512 */
3513 active->wm[0].enable = true;
3514 active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp);
3515 active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp);
3516 active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp);
3517 } else {
3518 int level;
3519
3520 /*
3521 * For inactive pipes, all watermark levels
3522 * should be marked as enabled but zeroed,
3523 * which is what we'd compute them to.
3524 */
3525 for (level = 0; level < dev_priv->display.wm.num_levels; level++)
3526 active->wm[level].enable = true;
3527 }
3528
3529 crtc->wm.active.ilk = *active;
3530 }
3531
ilk_sanitize_watermarks_add_affected(struct drm_atomic_state * state)3532 static int ilk_sanitize_watermarks_add_affected(struct drm_atomic_state *state)
3533 {
3534 struct drm_plane *plane;
3535 struct intel_crtc *crtc;
3536
3537 for_each_intel_crtc(state->dev, crtc) {
3538 struct intel_crtc_state *crtc_state;
3539
3540 crtc_state = intel_atomic_get_crtc_state(state, crtc);
3541 if (IS_ERR(crtc_state))
3542 return PTR_ERR(crtc_state);
3543
3544 if (crtc_state->hw.active) {
3545 /*
3546 * Preserve the inherited flag to avoid
3547 * taking the full modeset path.
3548 */
3549 crtc_state->inherited = true;
3550 }
3551 }
3552
3553 drm_for_each_plane(plane, state->dev) {
3554 struct drm_plane_state *plane_state;
3555
3556 plane_state = drm_atomic_get_plane_state(state, plane);
3557 if (IS_ERR(plane_state))
3558 return PTR_ERR(plane_state);
3559 }
3560
3561 return 0;
3562 }
3563
3564 /*
3565 * Calculate what we think the watermarks should be for the state we've read
3566 * out of the hardware and then immediately program those watermarks so that
3567 * we ensure the hardware settings match our internal state.
3568 *
3569 * We can calculate what we think WM's should be by creating a duplicate of the
3570 * current state (which was constructed during hardware readout) and running it
3571 * through the atomic check code to calculate new watermark values in the
3572 * state object.
3573 */
ilk_wm_sanitize(struct drm_i915_private * dev_priv)3574 void ilk_wm_sanitize(struct drm_i915_private *dev_priv)
3575 {
3576 struct drm_atomic_state *state;
3577 struct intel_atomic_state *intel_state;
3578 struct intel_crtc *crtc;
3579 struct intel_crtc_state *crtc_state;
3580 struct drm_modeset_acquire_ctx ctx;
3581 int ret;
3582 int i;
3583
3584 /* Only supported on platforms that use atomic watermark design */
3585 if (!dev_priv->display.funcs.wm->optimize_watermarks)
3586 return;
3587
3588 if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 9))
3589 return;
3590
3591 state = drm_atomic_state_alloc(&dev_priv->drm);
3592 if (drm_WARN_ON(&dev_priv->drm, !state))
3593 return;
3594
3595 intel_state = to_intel_atomic_state(state);
3596
3597 drm_modeset_acquire_init(&ctx, 0);
3598
3599 state->acquire_ctx = &ctx;
3600 to_intel_atomic_state(state)->internal = true;
3601
3602 retry:
3603 /*
3604 * Hardware readout is the only time we don't want to calculate
3605 * intermediate watermarks (since we don't trust the current
3606 * watermarks).
3607 */
3608 if (!HAS_GMCH(dev_priv))
3609 intel_state->skip_intermediate_wm = true;
3610
3611 ret = ilk_sanitize_watermarks_add_affected(state);
3612 if (ret)
3613 goto fail;
3614
3615 ret = intel_atomic_check(&dev_priv->drm, state);
3616 if (ret)
3617 goto fail;
3618
3619 /* Write calculated watermark values back */
3620 for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
3621 crtc_state->wm.need_postvbl_update = true;
3622 intel_optimize_watermarks(intel_state, crtc);
3623
3624 to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
3625 }
3626
3627 fail:
3628 if (ret == -EDEADLK) {
3629 drm_atomic_state_clear(state);
3630 drm_modeset_backoff(&ctx);
3631 goto retry;
3632 }
3633
3634 /*
3635 * If we fail here, it means that the hardware appears to be
3636 * programmed in a way that shouldn't be possible, given our
3637 * understanding of watermark requirements. This might mean a
3638 * mistake in the hardware readout code or a mistake in the
3639 * watermark calculations for a given platform. Raise a WARN
3640 * so that this is noticeable.
3641 *
3642 * If this actually happens, we'll have to just leave the
3643 * BIOS-programmed watermarks untouched and hope for the best.
3644 */
3645 drm_WARN(&dev_priv->drm, ret,
3646 "Could not determine valid watermarks for inherited state\n");
3647
3648 drm_atomic_state_put(state);
3649
3650 drm_modeset_drop_locks(&ctx);
3651 drm_modeset_acquire_fini(&ctx);
3652 }
3653
3654 #define _FW_WM(value, plane) \
3655 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
3656 #define _FW_WM_VLV(value, plane) \
3657 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
3658
g4x_read_wm_values(struct drm_i915_private * dev_priv,struct g4x_wm_values * wm)3659 static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
3660 struct g4x_wm_values *wm)
3661 {
3662 u32 tmp;
3663
3664 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
3665 wm->sr.plane = _FW_WM(tmp, SR);
3666 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
3667 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB);
3668 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA);
3669
3670 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2(dev_priv));
3671 wm->fbc_en = tmp & DSPFW_FBC_SR_EN;
3672 wm->sr.fbc = _FW_WM(tmp, FBC_SR);
3673 wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR);
3674 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB);
3675 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
3676 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA);
3677
3678 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
3679 wm->hpll_en = tmp & DSPFW_HPLL_SR_EN;
3680 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
3681 wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR);
3682 wm->hpll.plane = _FW_WM(tmp, HPLL_SR);
3683 }
3684
vlv_read_wm_values(struct drm_i915_private * dev_priv,struct vlv_wm_values * wm)3685 static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
3686 struct vlv_wm_values *wm)
3687 {
3688 enum pipe pipe;
3689 u32 tmp;
3690
3691 for_each_pipe(dev_priv, pipe) {
3692 tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe));
3693
3694 wm->ddl[pipe].plane[PLANE_PRIMARY] =
3695 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3696 wm->ddl[pipe].plane[PLANE_CURSOR] =
3697 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3698 wm->ddl[pipe].plane[PLANE_SPRITE0] =
3699 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3700 wm->ddl[pipe].plane[PLANE_SPRITE1] =
3701 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
3702 }
3703
3704 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1(dev_priv));
3705 wm->sr.plane = _FW_WM(tmp, SR);
3706 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
3707 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
3708 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
3709
3710 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2(dev_priv));
3711 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
3712 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
3713 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
3714
3715 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3(dev_priv));
3716 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
3717
3718 if (IS_CHERRYVIEW(dev_priv)) {
3719 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV);
3720 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
3721 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
3722
3723 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV);
3724 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
3725 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
3726
3727 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV);
3728 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
3729 wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
3730
3731 tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
3732 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
3733 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
3734 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
3735 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
3736 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
3737 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
3738 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
3739 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
3740 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
3741 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
3742 } else {
3743 tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7);
3744 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
3745 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
3746
3747 tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM);
3748 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
3749 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
3750 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
3751 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
3752 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
3753 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
3754 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
3755 }
3756 }
3757
3758 #undef _FW_WM
3759 #undef _FW_WM_VLV
3760
g4x_wm_get_hw_state(struct drm_i915_private * dev_priv)3761 static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
3762 {
3763 struct g4x_wm_values *wm = &dev_priv->display.wm.g4x;
3764 struct intel_crtc *crtc;
3765
3766 g4x_read_wm_values(dev_priv, wm);
3767
3768 wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN;
3769
3770 for_each_intel_crtc(&dev_priv->drm, crtc) {
3771 struct intel_crtc_state *crtc_state =
3772 to_intel_crtc_state(crtc->base.state);
3773 struct g4x_wm_state *active = &crtc->wm.active.g4x;
3774 struct g4x_pipe_wm *raw;
3775 enum pipe pipe = crtc->pipe;
3776 enum plane_id plane_id;
3777 int level, max_level;
3778
3779 active->cxsr = wm->cxsr;
3780 active->hpll_en = wm->hpll_en;
3781 active->fbc_en = wm->fbc_en;
3782
3783 active->sr = wm->sr;
3784 active->hpll = wm->hpll;
3785
3786 for_each_plane_id_on_crtc(crtc, plane_id) {
3787 active->wm.plane[plane_id] =
3788 wm->pipe[pipe].plane[plane_id];
3789 }
3790
3791 if (wm->cxsr && wm->hpll_en)
3792 max_level = G4X_WM_LEVEL_HPLL;
3793 else if (wm->cxsr)
3794 max_level = G4X_WM_LEVEL_SR;
3795 else
3796 max_level = G4X_WM_LEVEL_NORMAL;
3797
3798 level = G4X_WM_LEVEL_NORMAL;
3799 raw = &crtc_state->wm.g4x.raw[level];
3800 for_each_plane_id_on_crtc(crtc, plane_id)
3801 raw->plane[plane_id] = active->wm.plane[plane_id];
3802
3803 level = G4X_WM_LEVEL_SR;
3804 if (level > max_level)
3805 goto out;
3806
3807 raw = &crtc_state->wm.g4x.raw[level];
3808 raw->plane[PLANE_PRIMARY] = active->sr.plane;
3809 raw->plane[PLANE_CURSOR] = active->sr.cursor;
3810 raw->plane[PLANE_SPRITE0] = 0;
3811 raw->fbc = active->sr.fbc;
3812
3813 level = G4X_WM_LEVEL_HPLL;
3814 if (level > max_level)
3815 goto out;
3816
3817 raw = &crtc_state->wm.g4x.raw[level];
3818 raw->plane[PLANE_PRIMARY] = active->hpll.plane;
3819 raw->plane[PLANE_CURSOR] = active->hpll.cursor;
3820 raw->plane[PLANE_SPRITE0] = 0;
3821 raw->fbc = active->hpll.fbc;
3822
3823 level++;
3824 out:
3825 for_each_plane_id_on_crtc(crtc, plane_id)
3826 g4x_raw_plane_wm_set(crtc_state, level,
3827 plane_id, USHRT_MAX);
3828 g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX);
3829
3830 g4x_invalidate_wms(crtc, active, level);
3831
3832 crtc_state->wm.g4x.optimal = *active;
3833 crtc_state->wm.g4x.intermediate = *active;
3834
3835 drm_dbg_kms(&dev_priv->drm,
3836 "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n",
3837 pipe_name(pipe),
3838 wm->pipe[pipe].plane[PLANE_PRIMARY],
3839 wm->pipe[pipe].plane[PLANE_CURSOR],
3840 wm->pipe[pipe].plane[PLANE_SPRITE0]);
3841 }
3842
3843 drm_dbg_kms(&dev_priv->drm,
3844 "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n",
3845 wm->sr.plane, wm->sr.cursor, wm->sr.fbc);
3846 drm_dbg_kms(&dev_priv->drm,
3847 "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n",
3848 wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc);
3849 drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n",
3850 str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en),
3851 str_yes_no(wm->fbc_en));
3852 }
3853
g4x_wm_sanitize(struct drm_i915_private * dev_priv)3854 static void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
3855 {
3856 struct intel_display *display = &dev_priv->display;
3857 struct intel_plane *plane;
3858 struct intel_crtc *crtc;
3859
3860 mutex_lock(&dev_priv->display.wm.wm_mutex);
3861
3862 for_each_intel_plane(&dev_priv->drm, plane) {
3863 struct intel_crtc *crtc =
3864 intel_crtc_for_pipe(display, plane->pipe);
3865 struct intel_crtc_state *crtc_state =
3866 to_intel_crtc_state(crtc->base.state);
3867 struct intel_plane_state *plane_state =
3868 to_intel_plane_state(plane->base.state);
3869 enum plane_id plane_id = plane->id;
3870 int level;
3871
3872 if (plane_state->uapi.visible)
3873 continue;
3874
3875 for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
3876 struct g4x_pipe_wm *raw =
3877 &crtc_state->wm.g4x.raw[level];
3878
3879 raw->plane[plane_id] = 0;
3880
3881 if (plane_id == PLANE_PRIMARY)
3882 raw->fbc = 0;
3883 }
3884 }
3885
3886 for_each_intel_crtc(&dev_priv->drm, crtc) {
3887 struct intel_crtc_state *crtc_state =
3888 to_intel_crtc_state(crtc->base.state);
3889 int ret;
3890
3891 ret = _g4x_compute_pipe_wm(crtc_state);
3892 drm_WARN_ON(&dev_priv->drm, ret);
3893
3894 crtc_state->wm.g4x.intermediate =
3895 crtc_state->wm.g4x.optimal;
3896 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
3897 }
3898
3899 g4x_program_watermarks(dev_priv);
3900
3901 mutex_unlock(&dev_priv->display.wm.wm_mutex);
3902 }
3903
g4x_wm_get_hw_state_and_sanitize(struct drm_i915_private * i915)3904 static void g4x_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
3905 {
3906 g4x_wm_get_hw_state(i915);
3907 g4x_wm_sanitize(i915);
3908 }
3909
vlv_wm_get_hw_state(struct drm_i915_private * dev_priv)3910 static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
3911 {
3912 struct vlv_wm_values *wm = &dev_priv->display.wm.vlv;
3913 struct intel_crtc *crtc;
3914 u32 val;
3915
3916 vlv_read_wm_values(dev_priv, wm);
3917
3918 wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
3919 wm->level = VLV_WM_LEVEL_PM2;
3920
3921 if (IS_CHERRYVIEW(dev_priv)) {
3922 vlv_punit_get(dev_priv);
3923
3924 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
3925 if (val & DSP_MAXFIFO_PM5_ENABLE)
3926 wm->level = VLV_WM_LEVEL_PM5;
3927
3928 /*
3929 * If DDR DVFS is disabled in the BIOS, Punit
3930 * will never ack the request. So if that happens
3931 * assume we don't have to enable/disable DDR DVFS
3932 * dynamically. To test that just set the REQ_ACK
3933 * bit to poke the Punit, but don't change the
3934 * HIGH/LOW bits so that we don't actually change
3935 * the current state.
3936 */
3937 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
3938 val |= FORCE_DDR_FREQ_REQ_ACK;
3939 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
3940
3941 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
3942 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
3943 drm_dbg_kms(&dev_priv->drm,
3944 "Punit not acking DDR DVFS request, "
3945 "assuming DDR DVFS is disabled\n");
3946 dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
3947 } else {
3948 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
3949 if ((val & FORCE_DDR_HIGH_FREQ) == 0)
3950 wm->level = VLV_WM_LEVEL_DDR_DVFS;
3951 }
3952
3953 vlv_punit_put(dev_priv);
3954 }
3955
3956 for_each_intel_crtc(&dev_priv->drm, crtc) {
3957 struct intel_crtc_state *crtc_state =
3958 to_intel_crtc_state(crtc->base.state);
3959 struct vlv_wm_state *active = &crtc->wm.active.vlv;
3960 const struct vlv_fifo_state *fifo_state =
3961 &crtc_state->wm.vlv.fifo_state;
3962 enum pipe pipe = crtc->pipe;
3963 enum plane_id plane_id;
3964 int level;
3965
3966 vlv_get_fifo_size(crtc_state);
3967
3968 active->num_levels = wm->level + 1;
3969 active->cxsr = wm->cxsr;
3970
3971 for (level = 0; level < active->num_levels; level++) {
3972 struct g4x_pipe_wm *raw =
3973 &crtc_state->wm.vlv.raw[level];
3974
3975 active->sr[level].plane = wm->sr.plane;
3976 active->sr[level].cursor = wm->sr.cursor;
3977
3978 for_each_plane_id_on_crtc(crtc, plane_id) {
3979 active->wm[level].plane[plane_id] =
3980 wm->pipe[pipe].plane[plane_id];
3981
3982 raw->plane[plane_id] =
3983 vlv_invert_wm_value(active->wm[level].plane[plane_id],
3984 fifo_state->plane[plane_id]);
3985 }
3986 }
3987
3988 for_each_plane_id_on_crtc(crtc, plane_id)
3989 vlv_raw_plane_wm_set(crtc_state, level,
3990 plane_id, USHRT_MAX);
3991 vlv_invalidate_wms(crtc, active, level);
3992
3993 crtc_state->wm.vlv.optimal = *active;
3994 crtc_state->wm.vlv.intermediate = *active;
3995
3996 drm_dbg_kms(&dev_priv->drm,
3997 "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
3998 pipe_name(pipe),
3999 wm->pipe[pipe].plane[PLANE_PRIMARY],
4000 wm->pipe[pipe].plane[PLANE_CURSOR],
4001 wm->pipe[pipe].plane[PLANE_SPRITE0],
4002 wm->pipe[pipe].plane[PLANE_SPRITE1]);
4003 }
4004
4005 drm_dbg_kms(&dev_priv->drm,
4006 "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4007 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
4008 }
4009
vlv_wm_sanitize(struct drm_i915_private * dev_priv)4010 static void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
4011 {
4012 struct intel_display *display = &dev_priv->display;
4013 struct intel_plane *plane;
4014 struct intel_crtc *crtc;
4015
4016 mutex_lock(&dev_priv->display.wm.wm_mutex);
4017
4018 for_each_intel_plane(&dev_priv->drm, plane) {
4019 struct intel_crtc *crtc =
4020 intel_crtc_for_pipe(display, plane->pipe);
4021 struct intel_crtc_state *crtc_state =
4022 to_intel_crtc_state(crtc->base.state);
4023 struct intel_plane_state *plane_state =
4024 to_intel_plane_state(plane->base.state);
4025 enum plane_id plane_id = plane->id;
4026 int level;
4027
4028 if (plane_state->uapi.visible)
4029 continue;
4030
4031 for (level = 0; level < dev_priv->display.wm.num_levels; level++) {
4032 struct g4x_pipe_wm *raw =
4033 &crtc_state->wm.vlv.raw[level];
4034
4035 raw->plane[plane_id] = 0;
4036 }
4037 }
4038
4039 for_each_intel_crtc(&dev_priv->drm, crtc) {
4040 struct intel_crtc_state *crtc_state =
4041 to_intel_crtc_state(crtc->base.state);
4042 int ret;
4043
4044 ret = _vlv_compute_pipe_wm(crtc_state);
4045 drm_WARN_ON(&dev_priv->drm, ret);
4046
4047 crtc_state->wm.vlv.intermediate =
4048 crtc_state->wm.vlv.optimal;
4049 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
4050 }
4051
4052 vlv_program_watermarks(dev_priv);
4053
4054 mutex_unlock(&dev_priv->display.wm.wm_mutex);
4055 }
4056
vlv_wm_get_hw_state_and_sanitize(struct drm_i915_private * i915)4057 static void vlv_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915)
4058 {
4059 vlv_wm_get_hw_state(i915);
4060 vlv_wm_sanitize(i915);
4061 }
4062
4063 /*
4064 * FIXME should probably kill this and improve
4065 * the real watermark readout/sanitation instead
4066 */
ilk_init_lp_watermarks(struct drm_i915_private * dev_priv)4067 static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
4068 {
4069 intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0);
4070 intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0);
4071 intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0);
4072
4073 /*
4074 * Don't touch WM_LP_SPRITE_ENABLE here.
4075 * Doing so could cause underruns.
4076 */
4077 }
4078
ilk_wm_get_hw_state(struct drm_i915_private * dev_priv)4079 static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
4080 {
4081 struct ilk_wm_values *hw = &dev_priv->display.wm.hw;
4082 struct intel_crtc *crtc;
4083
4084 ilk_init_lp_watermarks(dev_priv);
4085
4086 for_each_intel_crtc(&dev_priv->drm, crtc)
4087 ilk_pipe_wm_get_hw_state(crtc);
4088
4089 hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK);
4090 hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK);
4091 hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK);
4092
4093 hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK);
4094 if (DISPLAY_VER(dev_priv) >= 7) {
4095 hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB);
4096 hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB);
4097 }
4098
4099 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4100 hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) &
4101 WM_MISC_DATA_PARTITION_5_6) ?
4102 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4103 else if (IS_IVYBRIDGE(dev_priv))
4104 hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) &
4105 DISP_DATA_PARTITION_5_6) ?
4106 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4107
4108 hw->enable_fbc_wm =
4109 !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
4110 }
4111
4112 static const struct intel_wm_funcs ilk_wm_funcs = {
4113 .compute_watermarks = ilk_compute_watermarks,
4114 .initial_watermarks = ilk_initial_watermarks,
4115 .optimize_watermarks = ilk_optimize_watermarks,
4116 .get_hw_state = ilk_wm_get_hw_state,
4117 };
4118
4119 static const struct intel_wm_funcs vlv_wm_funcs = {
4120 .compute_watermarks = vlv_compute_watermarks,
4121 .initial_watermarks = vlv_initial_watermarks,
4122 .optimize_watermarks = vlv_optimize_watermarks,
4123 .atomic_update_watermarks = vlv_atomic_update_fifo,
4124 .get_hw_state = vlv_wm_get_hw_state_and_sanitize,
4125 };
4126
4127 static const struct intel_wm_funcs g4x_wm_funcs = {
4128 .compute_watermarks = g4x_compute_watermarks,
4129 .initial_watermarks = g4x_initial_watermarks,
4130 .optimize_watermarks = g4x_optimize_watermarks,
4131 .get_hw_state = g4x_wm_get_hw_state_and_sanitize,
4132 };
4133
4134 static const struct intel_wm_funcs pnv_wm_funcs = {
4135 .compute_watermarks = i9xx_compute_watermarks,
4136 .update_wm = pnv_update_wm,
4137 };
4138
4139 static const struct intel_wm_funcs i965_wm_funcs = {
4140 .compute_watermarks = i9xx_compute_watermarks,
4141 .update_wm = i965_update_wm,
4142 };
4143
4144 static const struct intel_wm_funcs i9xx_wm_funcs = {
4145 .compute_watermarks = i9xx_compute_watermarks,
4146 .update_wm = i9xx_update_wm,
4147 };
4148
4149 static const struct intel_wm_funcs i845_wm_funcs = {
4150 .compute_watermarks = i9xx_compute_watermarks,
4151 .update_wm = i845_update_wm,
4152 };
4153
4154 static const struct intel_wm_funcs nop_funcs = {
4155 };
4156
i9xx_wm_init(struct drm_i915_private * dev_priv)4157 void i9xx_wm_init(struct drm_i915_private *dev_priv)
4158 {
4159 /* For FIFO watermark updates */
4160 if (HAS_PCH_SPLIT(dev_priv)) {
4161 ilk_setup_wm_latency(dev_priv);
4162 dev_priv->display.funcs.wm = &ilk_wm_funcs;
4163 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4164 vlv_setup_wm_latency(dev_priv);
4165 dev_priv->display.funcs.wm = &vlv_wm_funcs;
4166 } else if (IS_G4X(dev_priv)) {
4167 g4x_setup_wm_latency(dev_priv);
4168 dev_priv->display.funcs.wm = &g4x_wm_funcs;
4169 } else if (IS_PINEVIEW(dev_priv)) {
4170 if (!pnv_get_cxsr_latency(dev_priv)) {
4171 drm_info(&dev_priv->drm, "Unknown FSB/MEM, disabling CxSR\n");
4172 /* Disable CxSR and never update its watermark again */
4173 intel_set_memory_cxsr(dev_priv, false);
4174 dev_priv->display.funcs.wm = &nop_funcs;
4175 } else {
4176 dev_priv->display.funcs.wm = &pnv_wm_funcs;
4177 }
4178 } else if (DISPLAY_VER(dev_priv) == 4) {
4179 dev_priv->display.funcs.wm = &i965_wm_funcs;
4180 } else if (DISPLAY_VER(dev_priv) == 3) {
4181 dev_priv->display.funcs.wm = &i9xx_wm_funcs;
4182 } else if (DISPLAY_VER(dev_priv) == 2) {
4183 if (INTEL_NUM_PIPES(dev_priv) == 1)
4184 dev_priv->display.funcs.wm = &i845_wm_funcs;
4185 else
4186 dev_priv->display.funcs.wm = &i9xx_wm_funcs;
4187 } else {
4188 drm_err(&dev_priv->drm,
4189 "unexpected fall-through in %s\n", __func__);
4190 dev_priv->display.funcs.wm = &nop_funcs;
4191 }
4192 }
4193