1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <linux/debugfs.h> 7 8 #include "i915_drv.h" 9 #include "i9xx_wm.h" 10 #include "intel_display_types.h" 11 #include "intel_wm.h" 12 #include "skl_watermark.h" 13 14 /** 15 * intel_update_watermarks - update FIFO watermark values based on current modes 16 * @i915: i915 device 17 * 18 * Calculate watermark values for the various WM regs based on current mode 19 * and plane configuration. 20 * 21 * There are several cases to deal with here: 22 * - normal (i.e. non-self-refresh) 23 * - self-refresh (SR) mode 24 * - lines are large relative to FIFO size (buffer can hold up to 2) 25 * - lines are small relative to FIFO size (buffer can hold more than 2 26 * lines), so need to account for TLB latency 27 * 28 * The normal calculation is: 29 * watermark = dotclock * bytes per pixel * latency 30 * where latency is platform & configuration dependent (we assume pessimal 31 * values here). 32 * 33 * The SR calculation is: 34 * watermark = (trunc(latency/line time)+1) * surface width * 35 * bytes per pixel 36 * where 37 * line time = htotal / dotclock 38 * surface width = hdisplay for normal plane and 64 for cursor 39 * and latency is assumed to be high, as above. 40 * 41 * The final value programmed to the register should always be rounded up, 42 * and include an extra 2 entries to account for clock crossings. 43 * 44 * We don't use the sprite, so we can ignore that. And on Crestline we have 45 * to set the non-SR watermarks to 8. 46 */ 47 void intel_update_watermarks(struct drm_i915_private *i915) 48 { 49 if (i915->display.funcs.wm->update_wm) 50 i915->display.funcs.wm->update_wm(i915); 51 } 52 53 int intel_wm_compute(struct intel_atomic_state *state, 54 struct intel_crtc *crtc) 55 { 56 struct intel_display *display = to_intel_display(state); 57 58 if (!display->funcs.wm->compute_watermarks) 59 return 0; 60 61 return display->funcs.wm->compute_watermarks(state, crtc); 62 } 63 64 bool intel_initial_watermarks(struct intel_atomic_state *state, 65 struct intel_crtc *crtc) 66 { 67 struct drm_i915_private *i915 = to_i915(state->base.dev); 68 69 if (i915->display.funcs.wm->initial_watermarks) { 70 i915->display.funcs.wm->initial_watermarks(state, crtc); 71 return true; 72 } 73 74 return false; 75 } 76 77 void intel_atomic_update_watermarks(struct intel_atomic_state *state, 78 struct intel_crtc *crtc) 79 { 80 struct drm_i915_private *i915 = to_i915(state->base.dev); 81 82 if (i915->display.funcs.wm->atomic_update_watermarks) 83 i915->display.funcs.wm->atomic_update_watermarks(state, crtc); 84 } 85 86 void intel_optimize_watermarks(struct intel_atomic_state *state, 87 struct intel_crtc *crtc) 88 { 89 struct drm_i915_private *i915 = to_i915(state->base.dev); 90 91 if (i915->display.funcs.wm->optimize_watermarks) 92 i915->display.funcs.wm->optimize_watermarks(state, crtc); 93 } 94 95 int intel_compute_global_watermarks(struct intel_atomic_state *state) 96 { 97 struct drm_i915_private *i915 = to_i915(state->base.dev); 98 99 if (i915->display.funcs.wm->compute_global_watermarks) 100 return i915->display.funcs.wm->compute_global_watermarks(state); 101 102 return 0; 103 } 104 105 void intel_wm_get_hw_state(struct drm_i915_private *i915) 106 { 107 if (i915->display.funcs.wm->get_hw_state) 108 return i915->display.funcs.wm->get_hw_state(i915); 109 } 110 111 void intel_wm_sanitize(struct drm_i915_private *i915) 112 { 113 if (i915->display.funcs.wm->sanitize) 114 return i915->display.funcs.wm->sanitize(i915); 115 } 116 117 bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, 118 const struct intel_plane_state *plane_state) 119 { 120 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 121 122 /* FIXME check the 'enable' instead */ 123 if (!crtc_state->hw.active) 124 return false; 125 126 /* 127 * Treat cursor with fb as always visible since cursor updates 128 * can happen faster than the vrefresh rate, and the current 129 * watermark code doesn't handle that correctly. Cursor updates 130 * which set/clear the fb or change the cursor size are going 131 * to get throttled by intel_legacy_cursor_update() to work 132 * around this problem with the watermark code. 133 */ 134 if (plane->id == PLANE_CURSOR) 135 return plane_state->hw.fb != NULL; 136 else 137 return plane_state->uapi.visible; 138 } 139 140 void intel_print_wm_latency(struct drm_i915_private *dev_priv, 141 const char *name, const u16 wm[]) 142 { 143 int level; 144 145 for (level = 0; level < dev_priv->display.wm.num_levels; level++) { 146 unsigned int latency = wm[level]; 147 148 if (latency == 0) { 149 drm_dbg_kms(&dev_priv->drm, 150 "%s WM%d latency not provided\n", 151 name, level); 152 continue; 153 } 154 155 /* 156 * - latencies are in us on gen9. 157 * - before then, WM1+ latency values are in 0.5us units 158 */ 159 if (DISPLAY_VER(dev_priv) >= 9) 160 latency *= 10; 161 else if (level > 0) 162 latency *= 5; 163 164 drm_dbg_kms(&dev_priv->drm, 165 "%s WM%d latency %u (%u.%u usec)\n", name, level, 166 wm[level], latency / 10, latency % 10); 167 } 168 } 169 170 void intel_wm_init(struct drm_i915_private *i915) 171 { 172 if (DISPLAY_VER(i915) >= 9) 173 skl_wm_init(i915); 174 else 175 i9xx_wm_init(i915); 176 } 177 178 static void wm_latency_show(struct seq_file *m, const u16 wm[8]) 179 { 180 struct drm_i915_private *dev_priv = m->private; 181 int level; 182 183 drm_modeset_lock_all(&dev_priv->drm); 184 185 for (level = 0; level < dev_priv->display.wm.num_levels; level++) { 186 unsigned int latency = wm[level]; 187 188 /* 189 * - WM1+ latency values in 0.5us units 190 * - latencies are in us on gen9/vlv/chv 191 */ 192 if (DISPLAY_VER(dev_priv) >= 9 || 193 IS_VALLEYVIEW(dev_priv) || 194 IS_CHERRYVIEW(dev_priv) || 195 IS_G4X(dev_priv)) 196 latency *= 10; 197 else if (level > 0) 198 latency *= 5; 199 200 seq_printf(m, "WM%d %u (%u.%u usec)\n", 201 level, wm[level], latency / 10, latency % 10); 202 } 203 204 drm_modeset_unlock_all(&dev_priv->drm); 205 } 206 207 static int pri_wm_latency_show(struct seq_file *m, void *data) 208 { 209 struct drm_i915_private *dev_priv = m->private; 210 const u16 *latencies; 211 212 if (DISPLAY_VER(dev_priv) >= 9) 213 latencies = dev_priv->display.wm.skl_latency; 214 else 215 latencies = dev_priv->display.wm.pri_latency; 216 217 wm_latency_show(m, latencies); 218 219 return 0; 220 } 221 222 static int spr_wm_latency_show(struct seq_file *m, void *data) 223 { 224 struct drm_i915_private *dev_priv = m->private; 225 const u16 *latencies; 226 227 if (DISPLAY_VER(dev_priv) >= 9) 228 latencies = dev_priv->display.wm.skl_latency; 229 else 230 latencies = dev_priv->display.wm.spr_latency; 231 232 wm_latency_show(m, latencies); 233 234 return 0; 235 } 236 237 static int cur_wm_latency_show(struct seq_file *m, void *data) 238 { 239 struct drm_i915_private *dev_priv = m->private; 240 const u16 *latencies; 241 242 if (DISPLAY_VER(dev_priv) >= 9) 243 latencies = dev_priv->display.wm.skl_latency; 244 else 245 latencies = dev_priv->display.wm.cur_latency; 246 247 wm_latency_show(m, latencies); 248 249 return 0; 250 } 251 252 static int pri_wm_latency_open(struct inode *inode, struct file *file) 253 { 254 struct drm_i915_private *dev_priv = inode->i_private; 255 256 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) 257 return -ENODEV; 258 259 return single_open(file, pri_wm_latency_show, dev_priv); 260 } 261 262 static int spr_wm_latency_open(struct inode *inode, struct file *file) 263 { 264 struct drm_i915_private *dev_priv = inode->i_private; 265 266 if (HAS_GMCH(dev_priv)) 267 return -ENODEV; 268 269 return single_open(file, spr_wm_latency_show, dev_priv); 270 } 271 272 static int cur_wm_latency_open(struct inode *inode, struct file *file) 273 { 274 struct drm_i915_private *dev_priv = inode->i_private; 275 276 if (HAS_GMCH(dev_priv)) 277 return -ENODEV; 278 279 return single_open(file, cur_wm_latency_show, dev_priv); 280 } 281 282 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 283 size_t len, loff_t *offp, u16 wm[8]) 284 { 285 struct seq_file *m = file->private_data; 286 struct drm_i915_private *dev_priv = m->private; 287 u16 new[8] = {}; 288 int level; 289 int ret; 290 char tmp[32]; 291 292 if (len >= sizeof(tmp)) 293 return -EINVAL; 294 295 if (copy_from_user(tmp, ubuf, len)) 296 return -EFAULT; 297 298 tmp[len] = '\0'; 299 300 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 301 &new[0], &new[1], &new[2], &new[3], 302 &new[4], &new[5], &new[6], &new[7]); 303 if (ret != dev_priv->display.wm.num_levels) 304 return -EINVAL; 305 306 drm_modeset_lock_all(&dev_priv->drm); 307 308 for (level = 0; level < dev_priv->display.wm.num_levels; level++) 309 wm[level] = new[level]; 310 311 drm_modeset_unlock_all(&dev_priv->drm); 312 313 return len; 314 } 315 316 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 317 size_t len, loff_t *offp) 318 { 319 struct seq_file *m = file->private_data; 320 struct drm_i915_private *dev_priv = m->private; 321 u16 *latencies; 322 323 if (DISPLAY_VER(dev_priv) >= 9) 324 latencies = dev_priv->display.wm.skl_latency; 325 else 326 latencies = dev_priv->display.wm.pri_latency; 327 328 return wm_latency_write(file, ubuf, len, offp, latencies); 329 } 330 331 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 332 size_t len, loff_t *offp) 333 { 334 struct seq_file *m = file->private_data; 335 struct drm_i915_private *dev_priv = m->private; 336 u16 *latencies; 337 338 if (DISPLAY_VER(dev_priv) >= 9) 339 latencies = dev_priv->display.wm.skl_latency; 340 else 341 latencies = dev_priv->display.wm.spr_latency; 342 343 return wm_latency_write(file, ubuf, len, offp, latencies); 344 } 345 346 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 347 size_t len, loff_t *offp) 348 { 349 struct seq_file *m = file->private_data; 350 struct drm_i915_private *dev_priv = m->private; 351 u16 *latencies; 352 353 if (DISPLAY_VER(dev_priv) >= 9) 354 latencies = dev_priv->display.wm.skl_latency; 355 else 356 latencies = dev_priv->display.wm.cur_latency; 357 358 return wm_latency_write(file, ubuf, len, offp, latencies); 359 } 360 361 static const struct file_operations i915_pri_wm_latency_fops = { 362 .owner = THIS_MODULE, 363 .open = pri_wm_latency_open, 364 .read = seq_read, 365 .llseek = seq_lseek, 366 .release = single_release, 367 .write = pri_wm_latency_write 368 }; 369 370 static const struct file_operations i915_spr_wm_latency_fops = { 371 .owner = THIS_MODULE, 372 .open = spr_wm_latency_open, 373 .read = seq_read, 374 .llseek = seq_lseek, 375 .release = single_release, 376 .write = spr_wm_latency_write 377 }; 378 379 static const struct file_operations i915_cur_wm_latency_fops = { 380 .owner = THIS_MODULE, 381 .open = cur_wm_latency_open, 382 .read = seq_read, 383 .llseek = seq_lseek, 384 .release = single_release, 385 .write = cur_wm_latency_write 386 }; 387 388 void intel_wm_debugfs_register(struct drm_i915_private *i915) 389 { 390 struct drm_minor *minor = i915->drm.primary; 391 392 debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root, 393 i915, &i915_pri_wm_latency_fops); 394 395 debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root, 396 i915, &i915_spr_wm_latency_fops); 397 398 debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root, 399 i915, &i915_cur_wm_latency_fops); 400 401 skl_watermark_debugfs_register(i915); 402 } 403