1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include <linux/string_helpers.h> 7 8 #include <drm/drm_fixed.h> 9 #include <drm/drm_print.h> 10 11 #include "i915_reg.h" 12 #include "i915_utils.h" 13 #include "intel_atomic.h" 14 #include "intel_crtc.h" 15 #include "intel_ddi.h" 16 #include "intel_de.h" 17 #include "intel_display_types.h" 18 #include "intel_dp.h" 19 #include "intel_fdi.h" 20 #include "intel_fdi_regs.h" 21 #include "intel_link_bw.h" 22 23 struct intel_fdi_funcs { 24 void (*fdi_link_train)(struct intel_crtc *crtc, 25 const struct intel_crtc_state *crtc_state); 26 }; 27 28 static void assert_fdi_tx(struct intel_display *display, 29 enum pipe pipe, bool state) 30 { 31 bool cur_state; 32 33 if (HAS_DDI(display)) { 34 /* 35 * DDI does not have a specific FDI_TX register. 36 * 37 * FDI is never fed from EDP transcoder 38 * so pipe->transcoder cast is fine here. 39 */ 40 enum transcoder cpu_transcoder = (enum transcoder)pipe; 41 cur_state = intel_de_read(display, 42 TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE; 43 } else { 44 cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE; 45 } 46 INTEL_DISPLAY_STATE_WARN(display, cur_state != state, 47 "FDI TX state assertion failure (expected %s, current %s)\n", 48 str_on_off(state), str_on_off(cur_state)); 49 } 50 51 void assert_fdi_tx_enabled(struct intel_display *display, enum pipe pipe) 52 { 53 assert_fdi_tx(display, pipe, true); 54 } 55 56 void assert_fdi_tx_disabled(struct intel_display *display, enum pipe pipe) 57 { 58 assert_fdi_tx(display, pipe, false); 59 } 60 61 static void assert_fdi_rx(struct intel_display *display, 62 enum pipe pipe, bool state) 63 { 64 bool cur_state; 65 66 cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE; 67 INTEL_DISPLAY_STATE_WARN(display, cur_state != state, 68 "FDI RX state assertion failure (expected %s, current %s)\n", 69 str_on_off(state), str_on_off(cur_state)); 70 } 71 72 void assert_fdi_rx_enabled(struct intel_display *display, enum pipe pipe) 73 { 74 assert_fdi_rx(display, pipe, true); 75 } 76 77 void assert_fdi_rx_disabled(struct intel_display *display, enum pipe pipe) 78 { 79 assert_fdi_rx(display, pipe, false); 80 } 81 82 void assert_fdi_tx_pll_enabled(struct intel_display *display, enum pipe pipe) 83 { 84 bool cur_state; 85 86 /* ILK FDI PLL is always enabled */ 87 if (display->platform.ironlake) 88 return; 89 90 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 91 if (HAS_DDI(display)) 92 return; 93 94 cur_state = intel_de_read(display, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE; 95 INTEL_DISPLAY_STATE_WARN(display, !cur_state, 96 "FDI TX PLL assertion failure, should be active but is disabled\n"); 97 } 98 99 static void assert_fdi_rx_pll(struct intel_display *display, 100 enum pipe pipe, bool state) 101 { 102 bool cur_state; 103 104 cur_state = intel_de_read(display, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE; 105 INTEL_DISPLAY_STATE_WARN(display, cur_state != state, 106 "FDI RX PLL assertion failure (expected %s, current %s)\n", 107 str_on_off(state), str_on_off(cur_state)); 108 } 109 110 void assert_fdi_rx_pll_enabled(struct intel_display *display, enum pipe pipe) 111 { 112 assert_fdi_rx_pll(display, pipe, true); 113 } 114 115 void assert_fdi_rx_pll_disabled(struct intel_display *display, enum pipe pipe) 116 { 117 assert_fdi_rx_pll(display, pipe, false); 118 } 119 120 void intel_fdi_link_train(struct intel_crtc *crtc, 121 const struct intel_crtc_state *crtc_state) 122 { 123 struct intel_display *display = to_intel_display(crtc); 124 125 display->funcs.fdi->fdi_link_train(crtc, crtc_state); 126 } 127 128 /** 129 * intel_fdi_add_affected_crtcs - add CRTCs on FDI affected by other modeset CRTCs 130 * @state: intel atomic state 131 * 132 * Add a CRTC using FDI to @state if changing another CRTC's FDI BW usage is 133 * known to affect the available FDI BW for the former CRTC. In practice this 134 * means adding CRTC B on IVYBRIDGE if its use of FDI lanes is limited (by 135 * CRTC C) and CRTC C is getting disabled. 136 * 137 * Returns 0 in case of success, or a negative error code otherwise. 138 */ 139 int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state) 140 { 141 struct intel_display *display = to_intel_display(state); 142 const struct intel_crtc_state *old_crtc_state; 143 const struct intel_crtc_state *new_crtc_state; 144 struct intel_crtc *crtc; 145 146 if (!display->platform.ivybridge || INTEL_NUM_PIPES(display) != 3) 147 return 0; 148 149 crtc = intel_crtc_for_pipe(display, PIPE_C); 150 new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 151 if (!new_crtc_state) 152 return 0; 153 154 if (!intel_crtc_needs_modeset(new_crtc_state)) 155 return 0; 156 157 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 158 if (!old_crtc_state->fdi_lanes) 159 return 0; 160 161 crtc = intel_crtc_for_pipe(display, PIPE_B); 162 new_crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 163 if (IS_ERR(new_crtc_state)) 164 return PTR_ERR(new_crtc_state); 165 166 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 167 if (!old_crtc_state->fdi_lanes) 168 return 0; 169 170 return intel_modeset_pipes_in_mask_early(state, 171 "FDI link BW decrease on pipe C", 172 BIT(PIPE_B)); 173 } 174 175 /* units of 100MHz */ 176 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 177 { 178 if (crtc_state->hw.enable && crtc_state->has_pch_encoder) 179 return crtc_state->fdi_lanes; 180 181 return 0; 182 } 183 184 static int ilk_check_fdi_lanes(struct intel_display *display, enum pipe pipe, 185 struct intel_crtc_state *pipe_config, 186 enum pipe *pipe_to_reduce) 187 { 188 struct drm_atomic_state *state = pipe_config->uapi.state; 189 struct intel_crtc *other_crtc; 190 struct intel_crtc_state *other_crtc_state; 191 192 *pipe_to_reduce = pipe; 193 194 drm_dbg_kms(display->drm, 195 "checking fdi config on pipe %c, lanes %i\n", 196 pipe_name(pipe), pipe_config->fdi_lanes); 197 if (pipe_config->fdi_lanes > 4) { 198 drm_dbg_kms(display->drm, 199 "invalid fdi lane config on pipe %c: %i lanes\n", 200 pipe_name(pipe), pipe_config->fdi_lanes); 201 return -EINVAL; 202 } 203 204 if (display->platform.haswell || display->platform.broadwell) { 205 if (pipe_config->fdi_lanes > 2) { 206 drm_dbg_kms(display->drm, 207 "only 2 lanes on haswell, required: %i lanes\n", 208 pipe_config->fdi_lanes); 209 return -EINVAL; 210 } else { 211 return 0; 212 } 213 } 214 215 if (INTEL_NUM_PIPES(display) == 2) 216 return 0; 217 218 /* Ivybridge 3 pipe is really complicated */ 219 switch (pipe) { 220 case PIPE_A: 221 return 0; 222 case PIPE_B: 223 if (pipe_config->fdi_lanes <= 2) 224 return 0; 225 226 other_crtc = intel_crtc_for_pipe(display, PIPE_C); 227 other_crtc_state = 228 intel_atomic_get_crtc_state(state, other_crtc); 229 if (IS_ERR(other_crtc_state)) 230 return PTR_ERR(other_crtc_state); 231 232 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 233 drm_dbg_kms(display->drm, 234 "invalid shared fdi lane config on pipe %c: %i lanes\n", 235 pipe_name(pipe), pipe_config->fdi_lanes); 236 return -EINVAL; 237 } 238 return 0; 239 case PIPE_C: 240 if (pipe_config->fdi_lanes > 2) { 241 drm_dbg_kms(display->drm, 242 "only 2 lanes on pipe %c: required %i lanes\n", 243 pipe_name(pipe), pipe_config->fdi_lanes); 244 return -EINVAL; 245 } 246 247 other_crtc = intel_crtc_for_pipe(display, PIPE_B); 248 other_crtc_state = 249 intel_atomic_get_crtc_state(state, other_crtc); 250 if (IS_ERR(other_crtc_state)) 251 return PTR_ERR(other_crtc_state); 252 253 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 254 drm_dbg_kms(display->drm, 255 "fdi link B uses too many lanes to enable link C\n"); 256 257 *pipe_to_reduce = PIPE_B; 258 259 return -EINVAL; 260 } 261 return 0; 262 default: 263 MISSING_CASE(pipe); 264 return 0; 265 } 266 } 267 268 void intel_fdi_pll_freq_update(struct intel_display *display) 269 { 270 if (display->platform.ironlake) { 271 u32 fdi_pll_clk; 272 273 fdi_pll_clk = intel_de_read(display, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; 274 275 display->fdi.pll_freq = (fdi_pll_clk + 2) * 10000; 276 } else if (display->platform.sandybridge || display->platform.ivybridge) { 277 display->fdi.pll_freq = 270000; 278 } else { 279 return; 280 } 281 282 drm_dbg(display->drm, "FDI PLL freq=%d\n", display->fdi.pll_freq); 283 } 284 285 int intel_fdi_link_freq(struct intel_display *display, 286 const struct intel_crtc_state *pipe_config) 287 { 288 if (HAS_DDI(display)) 289 return pipe_config->port_clock; /* SPLL */ 290 else 291 return display->fdi.pll_freq; 292 } 293 294 /** 295 * intel_fdi_compute_pipe_bpp - compute pipe bpp limited by max link bpp 296 * @crtc_state: the crtc state 297 * 298 * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can 299 * call this function during state computation in the simple case where the 300 * link bpp will always match the pipe bpp. This is the case for all non-DP 301 * encoders, while DP encoders will use a link bpp lower than pipe bpp in case 302 * of DSC compression. 303 * 304 * Returns %true in case of success, %false if pipe bpp would need to be 305 * reduced below its valid range. 306 */ 307 bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state) 308 { 309 int pipe_bpp = min(crtc_state->pipe_bpp, 310 fxp_q4_to_int(crtc_state->max_link_bpp_x16)); 311 312 pipe_bpp = rounddown(pipe_bpp, 2 * 3); 313 314 if (pipe_bpp < 6 * 3) 315 return false; 316 317 crtc_state->pipe_bpp = pipe_bpp; 318 319 return true; 320 } 321 322 int ilk_fdi_compute_config(struct intel_crtc *crtc, 323 struct intel_crtc_state *pipe_config) 324 { 325 struct intel_display *display = to_intel_display(crtc); 326 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 327 int lane, link_bw, fdi_dotclock; 328 329 /* FDI is a binary signal running at ~2.7GHz, encoding 330 * each output octet as 10 bits. The actual frequency 331 * is stored as a divider into a 100MHz clock, and the 332 * mode pixel clock is stored in units of 1KHz. 333 * Hence the bw of each lane in terms of the mode signal 334 * is: 335 */ 336 link_bw = intel_fdi_link_freq(display, pipe_config); 337 338 fdi_dotclock = adjusted_mode->crtc_clock; 339 340 lane = ilk_get_lanes_required(fdi_dotclock, link_bw, 341 pipe_config->pipe_bpp); 342 343 pipe_config->fdi_lanes = lane; 344 345 intel_link_compute_m_n(fxp_q4_from_int(pipe_config->pipe_bpp), 346 lane, fdi_dotclock, 347 link_bw, 348 intel_dp_bw_fec_overhead(false), 349 &pipe_config->fdi_m_n); 350 351 return 0; 352 } 353 354 static int intel_fdi_atomic_check_bw(struct intel_atomic_state *state, 355 struct intel_crtc *crtc, 356 struct intel_crtc_state *pipe_config, 357 struct intel_link_bw_limits *limits) 358 { 359 struct intel_display *display = to_intel_display(crtc); 360 enum pipe pipe_to_reduce; 361 int ret; 362 363 ret = ilk_check_fdi_lanes(display, crtc->pipe, pipe_config, 364 &pipe_to_reduce); 365 if (ret != -EINVAL) 366 return ret; 367 368 ret = intel_link_bw_reduce_bpp(state, limits, 369 BIT(pipe_to_reduce), 370 "FDI link BW"); 371 372 return ret ? : -EAGAIN; 373 } 374 375 /** 376 * intel_fdi_atomic_check_link - check all modeset FDI link configuration 377 * @state: intel atomic state 378 * @limits: link BW limits 379 * 380 * Check the link configuration for all modeset FDI outputs. If the 381 * configuration is invalid @limits will be updated if possible to 382 * reduce the total BW, after which the configuration for all CRTCs in 383 * @state must be recomputed with the updated @limits. 384 * 385 * Returns: 386 * - 0 if the configuration is valid 387 * - %-EAGAIN, if the configuration is invalid and @limits got updated 388 * with fallback values with which the configuration of all CRTCs 389 * in @state must be recomputed 390 * - Other negative error, if the configuration is invalid without a 391 * fallback possibility, or the check failed for another reason 392 */ 393 int intel_fdi_atomic_check_link(struct intel_atomic_state *state, 394 struct intel_link_bw_limits *limits) 395 { 396 struct intel_crtc *crtc; 397 struct intel_crtc_state *crtc_state; 398 int i; 399 400 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 401 int ret; 402 403 if (!crtc_state->has_pch_encoder || 404 !intel_crtc_needs_modeset(crtc_state) || 405 !crtc_state->hw.enable) 406 continue; 407 408 ret = intel_fdi_atomic_check_bw(state, crtc, crtc_state, limits); 409 if (ret) 410 return ret; 411 } 412 413 return 0; 414 } 415 416 static void cpt_set_fdi_bc_bifurcation(struct intel_display *display, bool enable) 417 { 418 u32 temp; 419 420 temp = intel_de_read(display, SOUTH_CHICKEN1); 421 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 422 return; 423 424 drm_WARN_ON(display->drm, 425 intel_de_read(display, FDI_RX_CTL(PIPE_B)) & 426 FDI_RX_ENABLE); 427 drm_WARN_ON(display->drm, 428 intel_de_read(display, FDI_RX_CTL(PIPE_C)) & 429 FDI_RX_ENABLE); 430 431 temp &= ~FDI_BC_BIFURCATION_SELECT; 432 if (enable) 433 temp |= FDI_BC_BIFURCATION_SELECT; 434 435 drm_dbg_kms(display->drm, "%sabling fdi C rx\n", 436 enable ? "en" : "dis"); 437 intel_de_write(display, SOUTH_CHICKEN1, temp); 438 intel_de_posting_read(display, SOUTH_CHICKEN1); 439 } 440 441 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) 442 { 443 struct intel_display *display = to_intel_display(crtc_state); 444 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 445 446 switch (crtc->pipe) { 447 case PIPE_A: 448 break; 449 case PIPE_B: 450 if (crtc_state->fdi_lanes > 2) 451 cpt_set_fdi_bc_bifurcation(display, false); 452 else 453 cpt_set_fdi_bc_bifurcation(display, true); 454 455 break; 456 case PIPE_C: 457 cpt_set_fdi_bc_bifurcation(display, true); 458 459 break; 460 default: 461 MISSING_CASE(crtc->pipe); 462 } 463 } 464 465 void intel_fdi_normal_train(struct intel_crtc *crtc) 466 { 467 struct intel_display *display = to_intel_display(crtc); 468 enum pipe pipe = crtc->pipe; 469 i915_reg_t reg; 470 u32 temp; 471 472 /* enable normal train */ 473 reg = FDI_TX_CTL(pipe); 474 temp = intel_de_read(display, reg); 475 if (display->platform.ivybridge) { 476 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 477 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 478 } else { 479 temp &= ~FDI_LINK_TRAIN_NONE; 480 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 481 } 482 intel_de_write(display, reg, temp); 483 484 reg = FDI_RX_CTL(pipe); 485 temp = intel_de_read(display, reg); 486 if (HAS_PCH_CPT(display)) { 487 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 488 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 489 } else { 490 temp &= ~FDI_LINK_TRAIN_NONE; 491 temp |= FDI_LINK_TRAIN_NONE; 492 } 493 intel_de_write(display, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 494 495 /* wait one idle pattern time */ 496 intel_de_posting_read(display, reg); 497 udelay(1000); 498 499 /* IVB wants error correction enabled */ 500 if (display->platform.ivybridge) 501 intel_de_rmw(display, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE); 502 } 503 504 /* The FDI link training functions for ILK/Ibexpeak. */ 505 static void ilk_fdi_link_train(struct intel_crtc *crtc, 506 const struct intel_crtc_state *crtc_state) 507 { 508 struct intel_display *display = to_intel_display(crtc); 509 enum pipe pipe = crtc->pipe; 510 i915_reg_t reg; 511 u32 temp, tries; 512 513 /* 514 * Write the TU size bits before fdi link training, so that error 515 * detection works. 516 */ 517 intel_de_write(display, FDI_RX_TUSIZE1(pipe), 518 intel_de_read(display, PIPE_DATA_M1(display, pipe)) & TU_SIZE_MASK); 519 520 /* FDI needs bits from pipe first */ 521 assert_transcoder_enabled(display, crtc_state->cpu_transcoder); 522 523 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 524 for train result */ 525 reg = FDI_RX_IMR(pipe); 526 temp = intel_de_read(display, reg); 527 temp &= ~FDI_RX_SYMBOL_LOCK; 528 temp &= ~FDI_RX_BIT_LOCK; 529 intel_de_write(display, reg, temp); 530 intel_de_read(display, reg); 531 udelay(150); 532 533 /* enable CPU FDI TX and PCH FDI RX */ 534 reg = FDI_TX_CTL(pipe); 535 temp = intel_de_read(display, reg); 536 temp &= ~FDI_DP_PORT_WIDTH_MASK; 537 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 538 temp &= ~FDI_LINK_TRAIN_NONE; 539 temp |= FDI_LINK_TRAIN_PATTERN_1; 540 intel_de_write(display, reg, temp | FDI_TX_ENABLE); 541 542 reg = FDI_RX_CTL(pipe); 543 temp = intel_de_read(display, reg); 544 temp &= ~FDI_LINK_TRAIN_NONE; 545 temp |= FDI_LINK_TRAIN_PATTERN_1; 546 intel_de_write(display, reg, temp | FDI_RX_ENABLE); 547 548 intel_de_posting_read(display, reg); 549 udelay(150); 550 551 /* Ironlake workaround, enable clock pointer after FDI enable*/ 552 intel_de_write(display, FDI_RX_CHICKEN(pipe), 553 FDI_RX_PHASE_SYNC_POINTER_OVR); 554 intel_de_write(display, FDI_RX_CHICKEN(pipe), 555 FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN); 556 557 reg = FDI_RX_IIR(pipe); 558 for (tries = 0; tries < 5; tries++) { 559 temp = intel_de_read(display, reg); 560 drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp); 561 562 if ((temp & FDI_RX_BIT_LOCK)) { 563 drm_dbg_kms(display->drm, "FDI train 1 done.\n"); 564 intel_de_write(display, reg, temp | FDI_RX_BIT_LOCK); 565 break; 566 } 567 } 568 if (tries == 5) 569 drm_err(display->drm, "FDI train 1 fail!\n"); 570 571 /* Train 2 */ 572 intel_de_rmw(display, FDI_TX_CTL(pipe), 573 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2); 574 intel_de_rmw(display, FDI_RX_CTL(pipe), 575 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2); 576 intel_de_posting_read(display, FDI_RX_CTL(pipe)); 577 udelay(150); 578 579 reg = FDI_RX_IIR(pipe); 580 for (tries = 0; tries < 5; tries++) { 581 temp = intel_de_read(display, reg); 582 drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp); 583 584 if (temp & FDI_RX_SYMBOL_LOCK) { 585 intel_de_write(display, reg, 586 temp | FDI_RX_SYMBOL_LOCK); 587 drm_dbg_kms(display->drm, "FDI train 2 done.\n"); 588 break; 589 } 590 } 591 if (tries == 5) 592 drm_err(display->drm, "FDI train 2 fail!\n"); 593 594 drm_dbg_kms(display->drm, "FDI train done\n"); 595 596 } 597 598 static const int snb_b_fdi_train_param[] = { 599 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 600 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 601 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 602 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 603 }; 604 605 /* The FDI link training functions for SNB/Cougarpoint. */ 606 static void gen6_fdi_link_train(struct intel_crtc *crtc, 607 const struct intel_crtc_state *crtc_state) 608 { 609 struct intel_display *display = to_intel_display(crtc); 610 enum pipe pipe = crtc->pipe; 611 i915_reg_t reg; 612 u32 temp, i, retry; 613 614 /* 615 * Write the TU size bits before fdi link training, so that error 616 * detection works. 617 */ 618 intel_de_write(display, FDI_RX_TUSIZE1(pipe), 619 intel_de_read(display, PIPE_DATA_M1(display, pipe)) & TU_SIZE_MASK); 620 621 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 622 for train result */ 623 reg = FDI_RX_IMR(pipe); 624 temp = intel_de_read(display, reg); 625 temp &= ~FDI_RX_SYMBOL_LOCK; 626 temp &= ~FDI_RX_BIT_LOCK; 627 intel_de_write(display, reg, temp); 628 629 intel_de_posting_read(display, reg); 630 udelay(150); 631 632 /* enable CPU FDI TX and PCH FDI RX */ 633 reg = FDI_TX_CTL(pipe); 634 temp = intel_de_read(display, reg); 635 temp &= ~FDI_DP_PORT_WIDTH_MASK; 636 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 637 temp &= ~FDI_LINK_TRAIN_NONE; 638 temp |= FDI_LINK_TRAIN_PATTERN_1; 639 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 640 /* SNB-B */ 641 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 642 intel_de_write(display, reg, temp | FDI_TX_ENABLE); 643 644 intel_de_write(display, FDI_RX_MISC(pipe), 645 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 646 647 reg = FDI_RX_CTL(pipe); 648 temp = intel_de_read(display, reg); 649 if (HAS_PCH_CPT(display)) { 650 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 651 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 652 } else { 653 temp &= ~FDI_LINK_TRAIN_NONE; 654 temp |= FDI_LINK_TRAIN_PATTERN_1; 655 } 656 intel_de_write(display, reg, temp | FDI_RX_ENABLE); 657 658 intel_de_posting_read(display, reg); 659 udelay(150); 660 661 for (i = 0; i < 4; i++) { 662 intel_de_rmw(display, FDI_TX_CTL(pipe), 663 FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]); 664 intel_de_posting_read(display, FDI_TX_CTL(pipe)); 665 udelay(500); 666 667 for (retry = 0; retry < 5; retry++) { 668 reg = FDI_RX_IIR(pipe); 669 temp = intel_de_read(display, reg); 670 drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp); 671 if (temp & FDI_RX_BIT_LOCK) { 672 intel_de_write(display, reg, 673 temp | FDI_RX_BIT_LOCK); 674 drm_dbg_kms(display->drm, 675 "FDI train 1 done.\n"); 676 break; 677 } 678 udelay(50); 679 } 680 if (retry < 5) 681 break; 682 } 683 if (i == 4) 684 drm_err(display->drm, "FDI train 1 fail!\n"); 685 686 /* Train 2 */ 687 reg = FDI_TX_CTL(pipe); 688 temp = intel_de_read(display, reg); 689 temp &= ~FDI_LINK_TRAIN_NONE; 690 temp |= FDI_LINK_TRAIN_PATTERN_2; 691 if (display->platform.sandybridge) { 692 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 693 /* SNB-B */ 694 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 695 } 696 intel_de_write(display, reg, temp); 697 698 reg = FDI_RX_CTL(pipe); 699 temp = intel_de_read(display, reg); 700 if (HAS_PCH_CPT(display)) { 701 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 702 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 703 } else { 704 temp &= ~FDI_LINK_TRAIN_NONE; 705 temp |= FDI_LINK_TRAIN_PATTERN_2; 706 } 707 intel_de_write(display, reg, temp); 708 709 intel_de_posting_read(display, reg); 710 udelay(150); 711 712 for (i = 0; i < 4; i++) { 713 intel_de_rmw(display, FDI_TX_CTL(pipe), 714 FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]); 715 intel_de_posting_read(display, FDI_TX_CTL(pipe)); 716 udelay(500); 717 718 for (retry = 0; retry < 5; retry++) { 719 reg = FDI_RX_IIR(pipe); 720 temp = intel_de_read(display, reg); 721 drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp); 722 if (temp & FDI_RX_SYMBOL_LOCK) { 723 intel_de_write(display, reg, 724 temp | FDI_RX_SYMBOL_LOCK); 725 drm_dbg_kms(display->drm, 726 "FDI train 2 done.\n"); 727 break; 728 } 729 udelay(50); 730 } 731 if (retry < 5) 732 break; 733 } 734 if (i == 4) 735 drm_err(display->drm, "FDI train 2 fail!\n"); 736 737 drm_dbg_kms(display->drm, "FDI train done.\n"); 738 } 739 740 /* Manual link training for Ivy Bridge A0 parts */ 741 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, 742 const struct intel_crtc_state *crtc_state) 743 { 744 struct intel_display *display = to_intel_display(crtc); 745 enum pipe pipe = crtc->pipe; 746 i915_reg_t reg; 747 u32 temp, i, j; 748 749 ivb_update_fdi_bc_bifurcation(crtc_state); 750 751 /* 752 * Write the TU size bits before fdi link training, so that error 753 * detection works. 754 */ 755 intel_de_write(display, FDI_RX_TUSIZE1(pipe), 756 intel_de_read(display, PIPE_DATA_M1(display, pipe)) & TU_SIZE_MASK); 757 758 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 759 for train result */ 760 reg = FDI_RX_IMR(pipe); 761 temp = intel_de_read(display, reg); 762 temp &= ~FDI_RX_SYMBOL_LOCK; 763 temp &= ~FDI_RX_BIT_LOCK; 764 intel_de_write(display, reg, temp); 765 766 intel_de_posting_read(display, reg); 767 udelay(150); 768 769 drm_dbg_kms(display->drm, "FDI_RX_IIR before link train 0x%x\n", 770 intel_de_read(display, FDI_RX_IIR(pipe))); 771 772 /* Try each vswing and preemphasis setting twice before moving on */ 773 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 774 /* disable first in case we need to retry */ 775 reg = FDI_TX_CTL(pipe); 776 temp = intel_de_read(display, reg); 777 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 778 temp &= ~FDI_TX_ENABLE; 779 intel_de_write(display, reg, temp); 780 781 reg = FDI_RX_CTL(pipe); 782 temp = intel_de_read(display, reg); 783 temp &= ~FDI_LINK_TRAIN_AUTO; 784 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 785 temp &= ~FDI_RX_ENABLE; 786 intel_de_write(display, reg, temp); 787 788 /* enable CPU FDI TX and PCH FDI RX */ 789 reg = FDI_TX_CTL(pipe); 790 temp = intel_de_read(display, reg); 791 temp &= ~FDI_DP_PORT_WIDTH_MASK; 792 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 793 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 794 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 795 temp |= snb_b_fdi_train_param[j/2]; 796 temp |= FDI_COMPOSITE_SYNC; 797 intel_de_write(display, reg, temp | FDI_TX_ENABLE); 798 799 intel_de_write(display, FDI_RX_MISC(pipe), 800 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 801 802 reg = FDI_RX_CTL(pipe); 803 temp = intel_de_read(display, reg); 804 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 805 temp |= FDI_COMPOSITE_SYNC; 806 intel_de_write(display, reg, temp | FDI_RX_ENABLE); 807 808 intel_de_posting_read(display, reg); 809 udelay(1); /* should be 0.5us */ 810 811 for (i = 0; i < 4; i++) { 812 reg = FDI_RX_IIR(pipe); 813 temp = intel_de_read(display, reg); 814 drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp); 815 816 if (temp & FDI_RX_BIT_LOCK || 817 (intel_de_read(display, reg) & FDI_RX_BIT_LOCK)) { 818 intel_de_write(display, reg, 819 temp | FDI_RX_BIT_LOCK); 820 drm_dbg_kms(display->drm, 821 "FDI train 1 done, level %i.\n", 822 i); 823 break; 824 } 825 udelay(1); /* should be 0.5us */ 826 } 827 if (i == 4) { 828 drm_dbg_kms(display->drm, 829 "FDI train 1 fail on vswing %d\n", j / 2); 830 continue; 831 } 832 833 /* Train 2 */ 834 intel_de_rmw(display, FDI_TX_CTL(pipe), 835 FDI_LINK_TRAIN_NONE_IVB, 836 FDI_LINK_TRAIN_PATTERN_2_IVB); 837 intel_de_rmw(display, FDI_RX_CTL(pipe), 838 FDI_LINK_TRAIN_PATTERN_MASK_CPT, 839 FDI_LINK_TRAIN_PATTERN_2_CPT); 840 intel_de_posting_read(display, FDI_RX_CTL(pipe)); 841 udelay(2); /* should be 1.5us */ 842 843 for (i = 0; i < 4; i++) { 844 reg = FDI_RX_IIR(pipe); 845 temp = intel_de_read(display, reg); 846 drm_dbg_kms(display->drm, "FDI_RX_IIR 0x%x\n", temp); 847 848 if (temp & FDI_RX_SYMBOL_LOCK || 849 (intel_de_read(display, reg) & FDI_RX_SYMBOL_LOCK)) { 850 intel_de_write(display, reg, 851 temp | FDI_RX_SYMBOL_LOCK); 852 drm_dbg_kms(display->drm, 853 "FDI train 2 done, level %i.\n", 854 i); 855 goto train_done; 856 } 857 udelay(2); /* should be 1.5us */ 858 } 859 if (i == 4) 860 drm_dbg_kms(display->drm, 861 "FDI train 2 fail on vswing %d\n", j / 2); 862 } 863 864 train_done: 865 drm_dbg_kms(display->drm, "FDI train done.\n"); 866 } 867 868 /* Starting with Haswell, different DDI ports can work in FDI mode for 869 * connection to the PCH-located connectors. For this, it is necessary to train 870 * both the DDI port and PCH receiver for the desired DDI buffer settings. 871 * 872 * The recommended port to work in FDI mode is DDI E, which we use here. Also, 873 * please note that when FDI mode is active on DDI E, it shares 2 lines with 874 * DDI A (which is used for eDP) 875 */ 876 void hsw_fdi_link_train(struct intel_encoder *encoder, 877 const struct intel_crtc_state *crtc_state) 878 { 879 struct intel_display *display = to_intel_display(crtc_state); 880 u32 temp, i, rx_ctl_val; 881 int n_entries; 882 883 encoder->get_buf_trans(encoder, crtc_state, &n_entries); 884 885 hsw_prepare_dp_ddi_buffers(encoder, crtc_state); 886 887 /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the 888 * mode set "sequence for CRT port" document: 889 * - TP1 to TP2 time with the default value 890 * - FDI delay to 90h 891 * 892 * WaFDIAutoLinkSetTimingOverrride:hsw 893 */ 894 intel_de_write(display, FDI_RX_MISC(PIPE_A), 895 FDI_RX_PWRDN_LANE1_VAL(2) | 896 FDI_RX_PWRDN_LANE0_VAL(2) | 897 FDI_RX_TP1_TO_TP2_48 | 898 FDI_RX_FDI_DELAY_90); 899 900 /* Enable the PCH Receiver FDI PLL */ 901 rx_ctl_val = display->fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | 902 FDI_RX_PLL_ENABLE | 903 FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 904 intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val); 905 intel_de_posting_read(display, FDI_RX_CTL(PIPE_A)); 906 udelay(220); 907 908 /* Switch from Rawclk to PCDclk */ 909 rx_ctl_val |= FDI_PCDCLK; 910 intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val); 911 912 /* Configure Port Clock Select */ 913 drm_WARN_ON(display->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL); 914 intel_ddi_enable_clock(encoder, crtc_state); 915 916 /* Start the training iterating through available voltages and emphasis, 917 * testing each value twice. */ 918 for (i = 0; i < n_entries * 2; i++) { 919 /* Configure DP_TP_CTL with auto-training */ 920 intel_de_write(display, DP_TP_CTL(PORT_E), 921 DP_TP_CTL_FDI_AUTOTRAIN | 922 DP_TP_CTL_ENHANCED_FRAME_ENABLE | 923 DP_TP_CTL_LINK_TRAIN_PAT1 | 924 DP_TP_CTL_ENABLE); 925 926 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage. 927 * DDI E does not support port reversal, the functionality is 928 * achieved on the PCH side in FDI_RX_CTL, so no need to set the 929 * port reversal bit */ 930 intel_de_write(display, DDI_BUF_CTL(PORT_E), 931 DDI_BUF_CTL_ENABLE | 932 ((crtc_state->fdi_lanes - 1) << 1) | 933 DDI_BUF_TRANS_SELECT(i / 2)); 934 intel_de_posting_read(display, DDI_BUF_CTL(PORT_E)); 935 936 udelay(600); 937 938 /* Program PCH FDI Receiver TU */ 939 intel_de_write(display, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64)); 940 941 /* Enable PCH FDI Receiver with auto-training */ 942 rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO; 943 intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val); 944 intel_de_posting_read(display, FDI_RX_CTL(PIPE_A)); 945 946 /* Wait for FDI receiver lane calibration */ 947 udelay(30); 948 949 /* Unset FDI_RX_MISC pwrdn lanes */ 950 intel_de_rmw(display, FDI_RX_MISC(PIPE_A), 951 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0); 952 intel_de_posting_read(display, FDI_RX_MISC(PIPE_A)); 953 954 /* Wait for FDI auto training time */ 955 udelay(5); 956 957 temp = intel_de_read(display, DP_TP_STATUS(PORT_E)); 958 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { 959 drm_dbg_kms(display->drm, 960 "FDI link training done on step %d\n", i); 961 break; 962 } 963 964 /* 965 * Leave things enabled even if we failed to train FDI. 966 * Results in less fireworks from the state checker. 967 */ 968 if (i == n_entries * 2 - 1) { 969 drm_err(display->drm, "FDI link training failed!\n"); 970 break; 971 } 972 973 rx_ctl_val &= ~FDI_RX_ENABLE; 974 intel_de_write(display, FDI_RX_CTL(PIPE_A), rx_ctl_val); 975 intel_de_posting_read(display, FDI_RX_CTL(PIPE_A)); 976 977 intel_de_rmw(display, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0); 978 intel_de_posting_read(display, DDI_BUF_CTL(PORT_E)); 979 980 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */ 981 intel_de_rmw(display, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0); 982 intel_de_posting_read(display, DP_TP_CTL(PORT_E)); 983 984 intel_wait_ddi_buf_idle(display, PORT_E); 985 986 /* Reset FDI_RX_MISC pwrdn lanes */ 987 intel_de_rmw(display, FDI_RX_MISC(PIPE_A), 988 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 989 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2)); 990 intel_de_posting_read(display, FDI_RX_MISC(PIPE_A)); 991 } 992 993 /* Enable normal pixel sending for FDI */ 994 intel_de_write(display, DP_TP_CTL(PORT_E), 995 DP_TP_CTL_FDI_AUTOTRAIN | 996 DP_TP_CTL_LINK_TRAIN_NORMAL | 997 DP_TP_CTL_ENHANCED_FRAME_ENABLE | 998 DP_TP_CTL_ENABLE); 999 } 1000 1001 void hsw_fdi_disable(struct intel_encoder *encoder) 1002 { 1003 struct intel_display *display = to_intel_display(encoder); 1004 1005 /* 1006 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) 1007 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN, 1008 * step 13 is the correct place for it. Step 18 is where it was 1009 * originally before the BUN. 1010 */ 1011 intel_de_rmw(display, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0); 1012 intel_de_rmw(display, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0); 1013 intel_wait_ddi_buf_idle(display, PORT_E); 1014 intel_ddi_disable_clock(encoder); 1015 intel_de_rmw(display, FDI_RX_MISC(PIPE_A), 1016 FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 1017 FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2)); 1018 intel_de_rmw(display, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0); 1019 intel_de_rmw(display, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0); 1020 } 1021 1022 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) 1023 { 1024 struct intel_display *display = to_intel_display(crtc_state); 1025 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1026 enum pipe pipe = crtc->pipe; 1027 i915_reg_t reg; 1028 u32 temp; 1029 1030 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 1031 reg = FDI_RX_CTL(pipe); 1032 temp = intel_de_read(display, reg); 1033 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 1034 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 1035 temp |= (intel_de_read(display, TRANSCONF(display, pipe)) & TRANSCONF_BPC_MASK) << 11; 1036 intel_de_write(display, reg, temp | FDI_RX_PLL_ENABLE); 1037 1038 intel_de_posting_read(display, reg); 1039 udelay(200); 1040 1041 /* Switch from Rawclk to PCDclk */ 1042 intel_de_rmw(display, reg, 0, FDI_PCDCLK); 1043 intel_de_posting_read(display, reg); 1044 udelay(200); 1045 1046 /* Enable CPU FDI TX PLL, always on for Ironlake */ 1047 reg = FDI_TX_CTL(pipe); 1048 temp = intel_de_read(display, reg); 1049 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 1050 intel_de_write(display, reg, temp | FDI_TX_PLL_ENABLE); 1051 1052 intel_de_posting_read(display, reg); 1053 udelay(100); 1054 } 1055 } 1056 1057 void ilk_fdi_pll_disable(struct intel_crtc *crtc) 1058 { 1059 struct intel_display *display = to_intel_display(crtc); 1060 enum pipe pipe = crtc->pipe; 1061 1062 /* Switch from PCDclk to Rawclk */ 1063 intel_de_rmw(display, FDI_RX_CTL(pipe), FDI_PCDCLK, 0); 1064 1065 /* Disable CPU FDI TX PLL */ 1066 intel_de_rmw(display, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0); 1067 intel_de_posting_read(display, FDI_TX_CTL(pipe)); 1068 udelay(100); 1069 1070 /* Wait for the clocks to turn off. */ 1071 intel_de_rmw(display, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0); 1072 intel_de_posting_read(display, FDI_RX_CTL(pipe)); 1073 udelay(100); 1074 } 1075 1076 void ilk_fdi_disable(struct intel_crtc *crtc) 1077 { 1078 struct intel_display *display = to_intel_display(crtc); 1079 enum pipe pipe = crtc->pipe; 1080 i915_reg_t reg; 1081 u32 temp; 1082 1083 /* disable CPU FDI tx and PCH FDI rx */ 1084 intel_de_rmw(display, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0); 1085 intel_de_posting_read(display, FDI_TX_CTL(pipe)); 1086 1087 reg = FDI_RX_CTL(pipe); 1088 temp = intel_de_read(display, reg); 1089 temp &= ~(0x7 << 16); 1090 temp |= (intel_de_read(display, TRANSCONF(display, pipe)) & TRANSCONF_BPC_MASK) << 11; 1091 intel_de_write(display, reg, temp & ~FDI_RX_ENABLE); 1092 1093 intel_de_posting_read(display, reg); 1094 udelay(100); 1095 1096 /* Ironlake workaround, disable clock pointer after downing FDI */ 1097 if (HAS_PCH_IBX(display)) 1098 intel_de_write(display, FDI_RX_CHICKEN(pipe), 1099 FDI_RX_PHASE_SYNC_POINTER_OVR); 1100 1101 /* still set train pattern 1 */ 1102 intel_de_rmw(display, FDI_TX_CTL(pipe), 1103 FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1); 1104 1105 reg = FDI_RX_CTL(pipe); 1106 temp = intel_de_read(display, reg); 1107 if (HAS_PCH_CPT(display)) { 1108 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 1109 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 1110 } else { 1111 temp &= ~FDI_LINK_TRAIN_NONE; 1112 temp |= FDI_LINK_TRAIN_PATTERN_1; 1113 } 1114 /* BPC in FDI rx is consistent with that in TRANSCONF */ 1115 temp &= ~(0x07 << 16); 1116 temp |= (intel_de_read(display, TRANSCONF(display, pipe)) & TRANSCONF_BPC_MASK) << 11; 1117 intel_de_write(display, reg, temp); 1118 1119 intel_de_posting_read(display, reg); 1120 udelay(100); 1121 } 1122 1123 static const struct intel_fdi_funcs ilk_funcs = { 1124 .fdi_link_train = ilk_fdi_link_train, 1125 }; 1126 1127 static const struct intel_fdi_funcs gen6_funcs = { 1128 .fdi_link_train = gen6_fdi_link_train, 1129 }; 1130 1131 static const struct intel_fdi_funcs ivb_funcs = { 1132 .fdi_link_train = ivb_manual_fdi_link_train, 1133 }; 1134 1135 void 1136 intel_fdi_init_hook(struct intel_display *display) 1137 { 1138 if (display->platform.ironlake) { 1139 display->funcs.fdi = &ilk_funcs; 1140 } else if (display->platform.sandybridge) { 1141 display->funcs.fdi = &gen6_funcs; 1142 } else if (display->platform.ivybridge) { 1143 /* FIXME: detect B0+ stepping and use auto training */ 1144 display->funcs.fdi = &ivb_funcs; 1145 } 1146 } 1147