1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <linux/ctype.h> 7 #include <linux/debugfs.h> 8 #include <linux/int_log.h> 9 #include <linux/math.h> 10 11 #include <drm/drm_fixed.h> 12 #include <drm/drm_print.h> 13 14 #include "intel_atomic.h" 15 #include "intel_crtc.h" 16 #include "intel_display_core.h" 17 #include "intel_display_types.h" 18 #include "intel_dp.h" 19 #include "intel_dp_mst.h" 20 #include "intel_dp_tunnel.h" 21 #include "intel_fdi.h" 22 #include "intel_link_bw.h" 23 24 static int get_forced_link_bpp_x16(struct intel_atomic_state *state, 25 const struct intel_crtc *crtc) 26 { 27 struct intel_digital_connector_state *conn_state; 28 struct intel_connector *connector; 29 int force_bpp_x16 = INT_MAX; 30 int i; 31 32 for_each_new_intel_connector_in_state(state, connector, conn_state, i) { 33 if (conn_state->base.crtc != &crtc->base) 34 continue; 35 36 if (!connector->link.force_bpp_x16) 37 continue; 38 39 force_bpp_x16 = min(force_bpp_x16, connector->link.force_bpp_x16); 40 } 41 42 return force_bpp_x16 < INT_MAX ? force_bpp_x16 : 0; 43 } 44 45 /** 46 * intel_link_bw_init_limits - initialize BW limits 47 * @state: Atomic state 48 * @limits: link BW limits 49 * 50 * Initialize @limits. 51 */ 52 void intel_link_bw_init_limits(struct intel_atomic_state *state, 53 struct intel_link_bw_limits *limits) 54 { 55 struct intel_display *display = to_intel_display(state); 56 enum pipe pipe; 57 58 limits->force_fec_pipes = 0; 59 limits->bpp_limit_reached_pipes = 0; 60 for_each_pipe(display, pipe) { 61 struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); 62 const struct intel_crtc_state *crtc_state = 63 intel_atomic_get_new_crtc_state(state, crtc); 64 int forced_bpp_x16 = get_forced_link_bpp_x16(state, crtc); 65 66 if (state->base.duplicated && crtc_state) { 67 limits->max_bpp_x16[pipe] = crtc_state->max_link_bpp_x16; 68 if (crtc_state->fec_enable) 69 limits->force_fec_pipes |= BIT(pipe); 70 } else { 71 limits->max_bpp_x16[pipe] = INT_MAX; 72 } 73 74 if (forced_bpp_x16) 75 limits->max_bpp_x16[pipe] = min(limits->max_bpp_x16[pipe], forced_bpp_x16); 76 } 77 } 78 79 /** 80 * __intel_link_bw_reduce_bpp - reduce maximum link bpp for a selected pipe 81 * @state: atomic state 82 * @limits: link BW limits 83 * @pipe_mask: mask of pipes to select from 84 * @reason: explanation of why bpp reduction is needed 85 * @reduce_forced_bpp: allow reducing bpps below their forced link bpp 86 * 87 * Select the pipe from @pipe_mask with the biggest link bpp value and set the 88 * maximum of link bpp in @limits below this value. Modeset the selected pipe, 89 * so that its state will get recomputed. 90 * 91 * This function can be called to resolve a link's BW overallocation by reducing 92 * the link bpp of one pipe on the link and hence reducing the total link BW. 93 * 94 * Returns 95 * - 0 in case of success 96 * - %-ENOSPC if no pipe can further reduce its link bpp 97 * - Other negative error, if modesetting the selected pipe failed 98 */ 99 static int __intel_link_bw_reduce_bpp(struct intel_atomic_state *state, 100 struct intel_link_bw_limits *limits, 101 u8 pipe_mask, 102 const char *reason, 103 bool reduce_forced_bpp) 104 { 105 struct intel_display *display = to_intel_display(state); 106 enum pipe max_bpp_pipe = INVALID_PIPE; 107 struct intel_crtc *crtc; 108 int max_bpp_x16 = 0; 109 110 for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) { 111 struct intel_crtc_state *crtc_state; 112 int link_bpp_x16; 113 114 if (limits->bpp_limit_reached_pipes & BIT(crtc->pipe)) 115 continue; 116 117 crtc_state = intel_atomic_get_crtc_state(&state->base, 118 crtc); 119 if (IS_ERR(crtc_state)) 120 return PTR_ERR(crtc_state); 121 122 if (crtc_state->dsc.compression_enable) 123 link_bpp_x16 = crtc_state->dsc.compressed_bpp_x16; 124 else 125 /* 126 * TODO: for YUV420 the actual link bpp is only half 127 * of the pipe bpp value. The MST encoder's BW allocation 128 * is based on the pipe bpp value, set the actual link bpp 129 * limit here once the MST BW allocation is fixed. 130 */ 131 link_bpp_x16 = fxp_q4_from_int(crtc_state->pipe_bpp); 132 133 if (!reduce_forced_bpp && 134 link_bpp_x16 <= get_forced_link_bpp_x16(state, crtc)) 135 continue; 136 137 if (link_bpp_x16 > max_bpp_x16) { 138 max_bpp_x16 = link_bpp_x16; 139 max_bpp_pipe = crtc->pipe; 140 } 141 } 142 143 if (max_bpp_pipe == INVALID_PIPE) 144 return -ENOSPC; 145 146 limits->max_bpp_x16[max_bpp_pipe] = max_bpp_x16 - 1; 147 148 return intel_modeset_pipes_in_mask_early(state, reason, 149 BIT(max_bpp_pipe)); 150 } 151 152 int intel_link_bw_reduce_bpp(struct intel_atomic_state *state, 153 struct intel_link_bw_limits *limits, 154 u8 pipe_mask, 155 const char *reason) 156 { 157 int ret; 158 159 /* Try to keep any forced link BPP. */ 160 ret = __intel_link_bw_reduce_bpp(state, limits, pipe_mask, reason, false); 161 if (ret == -ENOSPC) 162 ret = __intel_link_bw_reduce_bpp(state, limits, pipe_mask, reason, true); 163 164 return ret; 165 } 166 167 /** 168 * intel_link_bw_compute_pipe_bpp - compute pipe bpp limited by max link bpp 169 * @crtc_state: the crtc state 170 * 171 * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can 172 * call this function during state computation in the simple case where the 173 * link bpp will always match the pipe bpp. This is the case for all non-DP 174 * encoders, while DP encoders will use a link bpp lower than pipe bpp in case 175 * of DSC compression. 176 * 177 * Returns %true in case of success, %false if pipe bpp would need to be 178 * reduced below its valid range. 179 */ 180 bool intel_link_bw_compute_pipe_bpp(struct intel_crtc_state *crtc_state) 181 { 182 int pipe_bpp = min(crtc_state->pipe_bpp, 183 fxp_q4_to_int(crtc_state->max_link_bpp_x16)); 184 185 pipe_bpp = rounddown(pipe_bpp, 2 * 3); 186 187 if (pipe_bpp < 6 * 3) 188 return false; 189 190 crtc_state->pipe_bpp = pipe_bpp; 191 192 return true; 193 } 194 195 /** 196 * intel_link_bw_set_bpp_limit_for_pipe - set link bpp limit for a pipe to its minimum 197 * @state: atomic state 198 * @old_limits: link BW limits 199 * @new_limits: link BW limits 200 * @pipe: pipe 201 * 202 * Set the link bpp limit for @pipe in @new_limits to its value in 203 * @old_limits and mark this limit as the minimum. This function must be 204 * called after a pipe's compute config function failed, @old_limits 205 * containing the bpp limit with which compute config previously passed. 206 * 207 * The function will fail if setting a minimum is not possible, either 208 * because the old and new limits match (and so would lead to a pipe compute 209 * config failure) or the limit is already at the minimum. 210 * 211 * Returns %true in case of success. 212 */ 213 bool 214 intel_link_bw_set_bpp_limit_for_pipe(struct intel_atomic_state *state, 215 const struct intel_link_bw_limits *old_limits, 216 struct intel_link_bw_limits *new_limits, 217 enum pipe pipe) 218 { 219 struct intel_display *display = to_intel_display(state); 220 221 if (pipe == INVALID_PIPE) 222 return false; 223 224 if (new_limits->max_bpp_x16[pipe] == 225 old_limits->max_bpp_x16[pipe]) 226 return false; 227 228 if (drm_WARN_ON(display->drm, 229 new_limits->bpp_limit_reached_pipes & BIT(pipe))) 230 return false; 231 232 new_limits->max_bpp_x16[pipe] = 233 old_limits->max_bpp_x16[pipe]; 234 new_limits->bpp_limit_reached_pipes |= BIT(pipe); 235 236 return true; 237 } 238 239 static int check_all_link_config(struct intel_atomic_state *state, 240 struct intel_link_bw_limits *limits) 241 { 242 /* TODO: Check additional shared display link configurations like MST */ 243 int ret; 244 245 ret = intel_dp_mst_atomic_check_link(state, limits); 246 if (ret) 247 return ret; 248 249 ret = intel_dp_tunnel_atomic_check_link(state, limits); 250 if (ret) 251 return ret; 252 253 ret = intel_fdi_atomic_check_link(state, limits); 254 if (ret) 255 return ret; 256 257 return 0; 258 } 259 260 static bool 261 assert_link_limit_change_valid(struct intel_display *display, 262 const struct intel_link_bw_limits *old_limits, 263 const struct intel_link_bw_limits *new_limits) 264 { 265 bool bpps_changed = false; 266 enum pipe pipe; 267 268 /* FEC can't be forced off after it was forced on. */ 269 if (drm_WARN_ON(display->drm, 270 (old_limits->force_fec_pipes & new_limits->force_fec_pipes) != 271 old_limits->force_fec_pipes)) 272 return false; 273 274 for_each_pipe(display, pipe) { 275 /* The bpp limit can only decrease. */ 276 if (drm_WARN_ON(display->drm, 277 new_limits->max_bpp_x16[pipe] > 278 old_limits->max_bpp_x16[pipe])) 279 return false; 280 281 if (new_limits->max_bpp_x16[pipe] < 282 old_limits->max_bpp_x16[pipe]) 283 bpps_changed = true; 284 } 285 286 /* At least one limit must change. */ 287 if (drm_WARN_ON(display->drm, 288 !bpps_changed && 289 new_limits->force_fec_pipes == 290 old_limits->force_fec_pipes)) 291 return false; 292 293 return true; 294 } 295 296 /** 297 * intel_link_bw_atomic_check - check display link states and set a fallback config if needed 298 * @state: atomic state 299 * @new_limits: link BW limits 300 * 301 * Check the configuration of all shared display links in @state and set new BW 302 * limits in @new_limits if there is a BW limitation. 303 * 304 * Returns: 305 * - 0 if the configuration is valid 306 * - %-EAGAIN, if the configuration is invalid and @new_limits got updated 307 * with fallback values with which the configuration of all CRTCs 308 * in @state must be recomputed 309 * - Other negative error, if the configuration is invalid without a 310 * fallback possibility, or the check failed for another reason 311 */ 312 int intel_link_bw_atomic_check(struct intel_atomic_state *state, 313 struct intel_link_bw_limits *new_limits) 314 { 315 struct intel_display *display = to_intel_display(state); 316 struct intel_link_bw_limits old_limits = *new_limits; 317 int ret; 318 319 ret = check_all_link_config(state, new_limits); 320 if (ret != -EAGAIN) 321 return ret; 322 323 if (!assert_link_limit_change_valid(display, &old_limits, new_limits)) 324 return -EINVAL; 325 326 return -EAGAIN; 327 } 328 329 static int force_link_bpp_show(struct seq_file *m, void *data) 330 { 331 struct intel_connector *connector = m->private; 332 333 seq_printf(m, FXP_Q4_FMT "\n", FXP_Q4_ARGS(connector->link.force_bpp_x16)); 334 335 return 0; 336 } 337 338 static int str_to_fxp_q4_nonneg_int(const char *str, int *val_x16) 339 { 340 unsigned int val; 341 int err; 342 343 err = kstrtouint(str, 10, &val); 344 if (err) 345 return err; 346 347 if (val > INT_MAX >> 4) 348 return -ERANGE; 349 350 *val_x16 = fxp_q4_from_int(val); 351 352 return 0; 353 } 354 355 /* modifies str */ 356 static int str_to_fxp_q4_nonneg(char *str, int *val_x16) 357 { 358 const char *int_str; 359 char *frac_str; 360 int frac_digits; 361 int frac_val; 362 int err; 363 364 int_str = strim(str); 365 frac_str = strchr(int_str, '.'); 366 367 if (frac_str) 368 *frac_str++ = '\0'; 369 370 err = str_to_fxp_q4_nonneg_int(int_str, val_x16); 371 if (err) 372 return err; 373 374 if (!frac_str) 375 return 0; 376 377 /* prevent negative number/leading +- sign mark */ 378 if (!isdigit(*frac_str)) 379 return -EINVAL; 380 381 err = str_to_fxp_q4_nonneg_int(frac_str, &frac_val); 382 if (err) 383 return err; 384 385 frac_digits = strlen(frac_str); 386 if (frac_digits > intlog10(INT_MAX) >> 24 || 387 frac_val > INT_MAX - int_pow(10, frac_digits) / 2) 388 return -ERANGE; 389 390 frac_val = DIV_ROUND_CLOSEST(frac_val, (int)int_pow(10, frac_digits)); 391 392 if (*val_x16 > INT_MAX - frac_val) 393 return -ERANGE; 394 395 *val_x16 += frac_val; 396 397 return 0; 398 } 399 400 static int user_str_to_fxp_q4_nonneg(const char __user *ubuf, size_t len, int *val_x16) 401 { 402 char *kbuf; 403 int err; 404 405 kbuf = memdup_user_nul(ubuf, len); 406 if (IS_ERR(kbuf)) 407 return PTR_ERR(kbuf); 408 409 err = str_to_fxp_q4_nonneg(kbuf, val_x16); 410 411 kfree(kbuf); 412 413 return err; 414 } 415 416 static bool connector_supports_dsc(struct intel_connector *connector) 417 { 418 struct intel_display *display = to_intel_display(connector); 419 420 switch (connector->base.connector_type) { 421 case DRM_MODE_CONNECTOR_eDP: 422 return intel_dp_has_dsc(connector); 423 case DRM_MODE_CONNECTOR_DisplayPort: 424 if (connector->mst.dp) 425 return HAS_DSC_MST(display); 426 427 return HAS_DSC(display); 428 default: 429 return false; 430 } 431 } 432 433 static ssize_t 434 force_link_bpp_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) 435 { 436 struct seq_file *m = file->private_data; 437 struct intel_connector *connector = m->private; 438 struct intel_display *display = to_intel_display(connector); 439 int min_bpp; 440 int bpp_x16; 441 int err; 442 443 err = user_str_to_fxp_q4_nonneg(ubuf, len, &bpp_x16); 444 if (err) 445 return err; 446 447 /* TODO: Make the non-DSC min_bpp value connector specific. */ 448 if (connector_supports_dsc(connector)) 449 min_bpp = intel_dp_dsc_min_src_compressed_bpp(); 450 else 451 min_bpp = intel_display_min_pipe_bpp(); 452 453 if (bpp_x16 && 454 (bpp_x16 < fxp_q4_from_int(min_bpp) || 455 bpp_x16 > fxp_q4_from_int(intel_display_max_pipe_bpp(display)))) 456 return -EINVAL; 457 458 err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex); 459 if (err) 460 return err; 461 462 connector->link.force_bpp_x16 = bpp_x16; 463 464 drm_modeset_unlock(&display->drm->mode_config.connection_mutex); 465 466 *offp += len; 467 468 return len; 469 } 470 DEFINE_SHOW_STORE_ATTRIBUTE(force_link_bpp); 471 472 void intel_link_bw_connector_debugfs_add(struct intel_connector *connector) 473 { 474 struct intel_display *display = to_intel_display(connector); 475 struct dentry *root = connector->base.debugfs_entry; 476 477 switch (connector->base.connector_type) { 478 case DRM_MODE_CONNECTOR_DisplayPort: 479 case DRM_MODE_CONNECTOR_eDP: 480 case DRM_MODE_CONNECTOR_HDMIA: 481 break; 482 case DRM_MODE_CONNECTOR_VGA: 483 case DRM_MODE_CONNECTOR_SVIDEO: 484 case DRM_MODE_CONNECTOR_LVDS: 485 case DRM_MODE_CONNECTOR_DVID: 486 if (HAS_FDI(display)) 487 break; 488 489 return; 490 default: 491 return; 492 } 493 494 debugfs_create_file("intel_force_link_bpp", 0644, root, 495 connector, &force_link_bpp_fops); 496 } 497