1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <linux/debugfs.h> 7 8 #include "i915_drv.h" 9 #include "i9xx_wm.h" 10 #include "intel_display_types.h" 11 #include "intel_wm.h" 12 #include "skl_watermark.h" 13 14 /** 15 * intel_update_watermarks - update FIFO watermark values based on current modes 16 * @i915: i915 device 17 * 18 * Calculate watermark values for the various WM regs based on current mode 19 * and plane configuration. 20 * 21 * There are several cases to deal with here: 22 * - normal (i.e. non-self-refresh) 23 * - self-refresh (SR) mode 24 * - lines are large relative to FIFO size (buffer can hold up to 2) 25 * - lines are small relative to FIFO size (buffer can hold more than 2 26 * lines), so need to account for TLB latency 27 * 28 * The normal calculation is: 29 * watermark = dotclock * bytes per pixel * latency 30 * where latency is platform & configuration dependent (we assume pessimal 31 * values here). 32 * 33 * The SR calculation is: 34 * watermark = (trunc(latency/line time)+1) * surface width * 35 * bytes per pixel 36 * where 37 * line time = htotal / dotclock 38 * surface width = hdisplay for normal plane and 64 for cursor 39 * and latency is assumed to be high, as above. 40 * 41 * The final value programmed to the register should always be rounded up, 42 * and include an extra 2 entries to account for clock crossings. 43 * 44 * We don't use the sprite, so we can ignore that. And on Crestline we have 45 * to set the non-SR watermarks to 8. 46 */ 47 void intel_update_watermarks(struct drm_i915_private *i915) 48 { 49 if (i915->display.funcs.wm->update_wm) 50 i915->display.funcs.wm->update_wm(i915); 51 } 52 53 int intel_wm_compute(struct intel_atomic_state *state, 54 struct intel_crtc *crtc) 55 { 56 struct intel_display *display = to_intel_display(state); 57 58 if (!display->funcs.wm->compute_watermarks) 59 return 0; 60 61 return display->funcs.wm->compute_watermarks(state, crtc); 62 } 63 64 bool intel_initial_watermarks(struct intel_atomic_state *state, 65 struct intel_crtc *crtc) 66 { 67 struct drm_i915_private *i915 = to_i915(state->base.dev); 68 69 if (i915->display.funcs.wm->initial_watermarks) { 70 i915->display.funcs.wm->initial_watermarks(state, crtc); 71 return true; 72 } 73 74 return false; 75 } 76 77 void intel_atomic_update_watermarks(struct intel_atomic_state *state, 78 struct intel_crtc *crtc) 79 { 80 struct drm_i915_private *i915 = to_i915(state->base.dev); 81 82 if (i915->display.funcs.wm->atomic_update_watermarks) 83 i915->display.funcs.wm->atomic_update_watermarks(state, crtc); 84 } 85 86 void intel_optimize_watermarks(struct intel_atomic_state *state, 87 struct intel_crtc *crtc) 88 { 89 struct drm_i915_private *i915 = to_i915(state->base.dev); 90 91 if (i915->display.funcs.wm->optimize_watermarks) 92 i915->display.funcs.wm->optimize_watermarks(state, crtc); 93 } 94 95 int intel_compute_global_watermarks(struct intel_atomic_state *state) 96 { 97 struct drm_i915_private *i915 = to_i915(state->base.dev); 98 99 if (i915->display.funcs.wm->compute_global_watermarks) 100 return i915->display.funcs.wm->compute_global_watermarks(state); 101 102 return 0; 103 } 104 105 void intel_wm_get_hw_state(struct drm_i915_private *i915) 106 { 107 if (i915->display.funcs.wm->get_hw_state) 108 return i915->display.funcs.wm->get_hw_state(i915); 109 } 110 111 bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, 112 const struct intel_plane_state *plane_state) 113 { 114 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 115 116 /* FIXME check the 'enable' instead */ 117 if (!crtc_state->hw.active) 118 return false; 119 120 /* 121 * Treat cursor with fb as always visible since cursor updates 122 * can happen faster than the vrefresh rate, and the current 123 * watermark code doesn't handle that correctly. Cursor updates 124 * which set/clear the fb or change the cursor size are going 125 * to get throttled by intel_legacy_cursor_update() to work 126 * around this problem with the watermark code. 127 */ 128 if (plane->id == PLANE_CURSOR) 129 return plane_state->hw.fb != NULL; 130 else 131 return plane_state->uapi.visible; 132 } 133 134 void intel_print_wm_latency(struct drm_i915_private *dev_priv, 135 const char *name, const u16 wm[]) 136 { 137 int level; 138 139 for (level = 0; level < dev_priv->display.wm.num_levels; level++) { 140 unsigned int latency = wm[level]; 141 142 if (latency == 0) { 143 drm_dbg_kms(&dev_priv->drm, 144 "%s WM%d latency not provided\n", 145 name, level); 146 continue; 147 } 148 149 /* 150 * - latencies are in us on gen9. 151 * - before then, WM1+ latency values are in 0.5us units 152 */ 153 if (DISPLAY_VER(dev_priv) >= 9) 154 latency *= 10; 155 else if (level > 0) 156 latency *= 5; 157 158 drm_dbg_kms(&dev_priv->drm, 159 "%s WM%d latency %u (%u.%u usec)\n", name, level, 160 wm[level], latency / 10, latency % 10); 161 } 162 } 163 164 void intel_wm_init(struct drm_i915_private *i915) 165 { 166 if (DISPLAY_VER(i915) >= 9) 167 skl_wm_init(i915); 168 else 169 i9xx_wm_init(i915); 170 } 171 172 static void wm_latency_show(struct seq_file *m, const u16 wm[8]) 173 { 174 struct drm_i915_private *dev_priv = m->private; 175 int level; 176 177 drm_modeset_lock_all(&dev_priv->drm); 178 179 for (level = 0; level < dev_priv->display.wm.num_levels; level++) { 180 unsigned int latency = wm[level]; 181 182 /* 183 * - WM1+ latency values in 0.5us units 184 * - latencies are in us on gen9/vlv/chv 185 */ 186 if (DISPLAY_VER(dev_priv) >= 9 || 187 IS_VALLEYVIEW(dev_priv) || 188 IS_CHERRYVIEW(dev_priv) || 189 IS_G4X(dev_priv)) 190 latency *= 10; 191 else if (level > 0) 192 latency *= 5; 193 194 seq_printf(m, "WM%d %u (%u.%u usec)\n", 195 level, wm[level], latency / 10, latency % 10); 196 } 197 198 drm_modeset_unlock_all(&dev_priv->drm); 199 } 200 201 static int pri_wm_latency_show(struct seq_file *m, void *data) 202 { 203 struct drm_i915_private *dev_priv = m->private; 204 const u16 *latencies; 205 206 if (DISPLAY_VER(dev_priv) >= 9) 207 latencies = dev_priv->display.wm.skl_latency; 208 else 209 latencies = dev_priv->display.wm.pri_latency; 210 211 wm_latency_show(m, latencies); 212 213 return 0; 214 } 215 216 static int spr_wm_latency_show(struct seq_file *m, void *data) 217 { 218 struct drm_i915_private *dev_priv = m->private; 219 const u16 *latencies; 220 221 if (DISPLAY_VER(dev_priv) >= 9) 222 latencies = dev_priv->display.wm.skl_latency; 223 else 224 latencies = dev_priv->display.wm.spr_latency; 225 226 wm_latency_show(m, latencies); 227 228 return 0; 229 } 230 231 static int cur_wm_latency_show(struct seq_file *m, void *data) 232 { 233 struct drm_i915_private *dev_priv = m->private; 234 const u16 *latencies; 235 236 if (DISPLAY_VER(dev_priv) >= 9) 237 latencies = dev_priv->display.wm.skl_latency; 238 else 239 latencies = dev_priv->display.wm.cur_latency; 240 241 wm_latency_show(m, latencies); 242 243 return 0; 244 } 245 246 static int pri_wm_latency_open(struct inode *inode, struct file *file) 247 { 248 struct drm_i915_private *dev_priv = inode->i_private; 249 250 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) 251 return -ENODEV; 252 253 return single_open(file, pri_wm_latency_show, dev_priv); 254 } 255 256 static int spr_wm_latency_open(struct inode *inode, struct file *file) 257 { 258 struct drm_i915_private *dev_priv = inode->i_private; 259 260 if (HAS_GMCH(dev_priv)) 261 return -ENODEV; 262 263 return single_open(file, spr_wm_latency_show, dev_priv); 264 } 265 266 static int cur_wm_latency_open(struct inode *inode, struct file *file) 267 { 268 struct drm_i915_private *dev_priv = inode->i_private; 269 270 if (HAS_GMCH(dev_priv)) 271 return -ENODEV; 272 273 return single_open(file, cur_wm_latency_show, dev_priv); 274 } 275 276 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 277 size_t len, loff_t *offp, u16 wm[8]) 278 { 279 struct seq_file *m = file->private_data; 280 struct drm_i915_private *dev_priv = m->private; 281 u16 new[8] = {}; 282 int level; 283 int ret; 284 char tmp[32]; 285 286 if (len >= sizeof(tmp)) 287 return -EINVAL; 288 289 if (copy_from_user(tmp, ubuf, len)) 290 return -EFAULT; 291 292 tmp[len] = '\0'; 293 294 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 295 &new[0], &new[1], &new[2], &new[3], 296 &new[4], &new[5], &new[6], &new[7]); 297 if (ret != dev_priv->display.wm.num_levels) 298 return -EINVAL; 299 300 drm_modeset_lock_all(&dev_priv->drm); 301 302 for (level = 0; level < dev_priv->display.wm.num_levels; level++) 303 wm[level] = new[level]; 304 305 drm_modeset_unlock_all(&dev_priv->drm); 306 307 return len; 308 } 309 310 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 311 size_t len, loff_t *offp) 312 { 313 struct seq_file *m = file->private_data; 314 struct drm_i915_private *dev_priv = m->private; 315 u16 *latencies; 316 317 if (DISPLAY_VER(dev_priv) >= 9) 318 latencies = dev_priv->display.wm.skl_latency; 319 else 320 latencies = dev_priv->display.wm.pri_latency; 321 322 return wm_latency_write(file, ubuf, len, offp, latencies); 323 } 324 325 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 326 size_t len, loff_t *offp) 327 { 328 struct seq_file *m = file->private_data; 329 struct drm_i915_private *dev_priv = m->private; 330 u16 *latencies; 331 332 if (DISPLAY_VER(dev_priv) >= 9) 333 latencies = dev_priv->display.wm.skl_latency; 334 else 335 latencies = dev_priv->display.wm.spr_latency; 336 337 return wm_latency_write(file, ubuf, len, offp, latencies); 338 } 339 340 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 341 size_t len, loff_t *offp) 342 { 343 struct seq_file *m = file->private_data; 344 struct drm_i915_private *dev_priv = m->private; 345 u16 *latencies; 346 347 if (DISPLAY_VER(dev_priv) >= 9) 348 latencies = dev_priv->display.wm.skl_latency; 349 else 350 latencies = dev_priv->display.wm.cur_latency; 351 352 return wm_latency_write(file, ubuf, len, offp, latencies); 353 } 354 355 static const struct file_operations i915_pri_wm_latency_fops = { 356 .owner = THIS_MODULE, 357 .open = pri_wm_latency_open, 358 .read = seq_read, 359 .llseek = seq_lseek, 360 .release = single_release, 361 .write = pri_wm_latency_write 362 }; 363 364 static const struct file_operations i915_spr_wm_latency_fops = { 365 .owner = THIS_MODULE, 366 .open = spr_wm_latency_open, 367 .read = seq_read, 368 .llseek = seq_lseek, 369 .release = single_release, 370 .write = spr_wm_latency_write 371 }; 372 373 static const struct file_operations i915_cur_wm_latency_fops = { 374 .owner = THIS_MODULE, 375 .open = cur_wm_latency_open, 376 .read = seq_read, 377 .llseek = seq_lseek, 378 .release = single_release, 379 .write = cur_wm_latency_write 380 }; 381 382 void intel_wm_debugfs_register(struct drm_i915_private *i915) 383 { 384 struct drm_minor *minor = i915->drm.primary; 385 386 debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root, 387 i915, &i915_pri_wm_latency_fops); 388 389 debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root, 390 i915, &i915_spr_wm_latency_fops); 391 392 debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root, 393 i915, &i915_cur_wm_latency_fops); 394 395 skl_watermark_debugfs_register(i915); 396 } 397