1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include <linux/bitops.h> 7 8 #include "i915_drv.h" 9 #include "i915_reg.h" 10 #include "intel_atomic.h" 11 #include "intel_bw.h" 12 #include "intel_cdclk.h" 13 #include "intel_de.h" 14 #include "intel_display_trace.h" 15 #include "intel_pmdemand.h" 16 #include "skl_watermark.h" 17 18 static struct intel_global_state * 19 intel_pmdemand_duplicate_state(struct intel_global_obj *obj) 20 { 21 struct intel_pmdemand_state *pmdemand_state; 22 23 pmdemand_state = kmemdup(obj->state, sizeof(*pmdemand_state), GFP_KERNEL); 24 if (!pmdemand_state) 25 return NULL; 26 27 return &pmdemand_state->base; 28 } 29 30 static void intel_pmdemand_destroy_state(struct intel_global_obj *obj, 31 struct intel_global_state *state) 32 { 33 kfree(state); 34 } 35 36 static const struct intel_global_state_funcs intel_pmdemand_funcs = { 37 .atomic_duplicate_state = intel_pmdemand_duplicate_state, 38 .atomic_destroy_state = intel_pmdemand_destroy_state, 39 }; 40 41 static struct intel_pmdemand_state * 42 intel_atomic_get_pmdemand_state(struct intel_atomic_state *state) 43 { 44 struct drm_i915_private *i915 = to_i915(state->base.dev); 45 struct intel_global_state *pmdemand_state = 46 intel_atomic_get_global_obj_state(state, 47 &i915->display.pmdemand.obj); 48 49 if (IS_ERR(pmdemand_state)) 50 return ERR_CAST(pmdemand_state); 51 52 return to_intel_pmdemand_state(pmdemand_state); 53 } 54 55 static struct intel_pmdemand_state * 56 intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state) 57 { 58 struct drm_i915_private *i915 = to_i915(state->base.dev); 59 struct intel_global_state *pmdemand_state = 60 intel_atomic_get_old_global_obj_state(state, 61 &i915->display.pmdemand.obj); 62 63 if (!pmdemand_state) 64 return NULL; 65 66 return to_intel_pmdemand_state(pmdemand_state); 67 } 68 69 static struct intel_pmdemand_state * 70 intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state) 71 { 72 struct drm_i915_private *i915 = to_i915(state->base.dev); 73 struct intel_global_state *pmdemand_state = 74 intel_atomic_get_new_global_obj_state(state, 75 &i915->display.pmdemand.obj); 76 77 if (!pmdemand_state) 78 return NULL; 79 80 return to_intel_pmdemand_state(pmdemand_state); 81 } 82 83 int intel_pmdemand_init(struct drm_i915_private *i915) 84 { 85 struct intel_pmdemand_state *pmdemand_state; 86 87 pmdemand_state = kzalloc(sizeof(*pmdemand_state), GFP_KERNEL); 88 if (!pmdemand_state) 89 return -ENOMEM; 90 91 intel_atomic_global_obj_init(i915, &i915->display.pmdemand.obj, 92 &pmdemand_state->base, 93 &intel_pmdemand_funcs); 94 95 if (IS_DISPLAY_IP_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0)) 96 /* Wa_14016740474 */ 97 intel_de_rmw(i915, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE); 98 99 return 0; 100 } 101 102 void intel_pmdemand_init_early(struct drm_i915_private *i915) 103 { 104 mutex_init(&i915->display.pmdemand.lock); 105 init_waitqueue_head(&i915->display.pmdemand.waitqueue); 106 } 107 108 void 109 intel_pmdemand_update_phys_mask(struct drm_i915_private *i915, 110 struct intel_encoder *encoder, 111 struct intel_pmdemand_state *pmdemand_state, 112 bool set_bit) 113 { 114 enum phy phy; 115 116 if (DISPLAY_VER(i915) < 14) 117 return; 118 119 if (!encoder) 120 return; 121 122 if (intel_encoder_is_tc(encoder)) 123 return; 124 125 phy = intel_encoder_to_phy(encoder); 126 127 if (set_bit) 128 pmdemand_state->active_combo_phys_mask |= BIT(phy); 129 else 130 pmdemand_state->active_combo_phys_mask &= ~BIT(phy); 131 } 132 133 void 134 intel_pmdemand_update_port_clock(struct drm_i915_private *i915, 135 struct intel_pmdemand_state *pmdemand_state, 136 enum pipe pipe, int port_clock) 137 { 138 if (DISPLAY_VER(i915) < 14) 139 return; 140 141 pmdemand_state->ddi_clocks[pipe] = port_clock; 142 } 143 144 static void 145 intel_pmdemand_update_max_ddiclk(struct drm_i915_private *i915, 146 struct intel_atomic_state *state, 147 struct intel_pmdemand_state *pmdemand_state) 148 { 149 int max_ddiclk = 0; 150 const struct intel_crtc_state *new_crtc_state; 151 struct intel_crtc *crtc; 152 int i; 153 154 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 155 intel_pmdemand_update_port_clock(i915, pmdemand_state, 156 crtc->pipe, 157 new_crtc_state->port_clock); 158 159 for (i = 0; i < ARRAY_SIZE(pmdemand_state->ddi_clocks); i++) 160 max_ddiclk = max(pmdemand_state->ddi_clocks[i], max_ddiclk); 161 162 pmdemand_state->params.ddiclk_max = DIV_ROUND_UP(max_ddiclk, 1000); 163 } 164 165 static void 166 intel_pmdemand_update_connector_phys(struct drm_i915_private *i915, 167 struct intel_atomic_state *state, 168 struct drm_connector_state *conn_state, 169 bool set_bit, 170 struct intel_pmdemand_state *pmdemand_state) 171 { 172 struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); 173 struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc); 174 struct intel_crtc_state *crtc_state; 175 176 if (!crtc) 177 return; 178 179 if (set_bit) 180 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 181 else 182 crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 183 184 if (!crtc_state->hw.active) 185 return; 186 187 intel_pmdemand_update_phys_mask(i915, encoder, pmdemand_state, 188 set_bit); 189 } 190 191 static void 192 intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private *i915, 193 struct intel_atomic_state *state, 194 struct intel_pmdemand_state *pmdemand_state) 195 { 196 struct drm_connector_state *old_conn_state; 197 struct drm_connector_state *new_conn_state; 198 struct drm_connector *connector; 199 int i; 200 201 for_each_oldnew_connector_in_state(&state->base, connector, 202 old_conn_state, new_conn_state, i) { 203 if (!intel_connector_needs_modeset(state, connector)) 204 continue; 205 206 /* First clear the active phys in the old connector state */ 207 intel_pmdemand_update_connector_phys(i915, state, 208 old_conn_state, false, 209 pmdemand_state); 210 211 /* Then set the active phys in new connector state */ 212 intel_pmdemand_update_connector_phys(i915, state, 213 new_conn_state, true, 214 pmdemand_state); 215 } 216 217 pmdemand_state->params.active_phys = 218 min_t(u16, hweight16(pmdemand_state->active_combo_phys_mask), 219 7); 220 } 221 222 static bool 223 intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915, 224 struct intel_encoder *encoder) 225 { 226 return encoder && intel_encoder_is_tc(encoder); 227 } 228 229 static bool 230 intel_pmdemand_connector_needs_update(struct intel_atomic_state *state) 231 { 232 struct drm_i915_private *i915 = to_i915(state->base.dev); 233 struct drm_connector_state *old_conn_state; 234 struct drm_connector_state *new_conn_state; 235 struct drm_connector *connector; 236 int i; 237 238 for_each_oldnew_connector_in_state(&state->base, connector, 239 old_conn_state, new_conn_state, i) { 240 struct intel_encoder *old_encoder = 241 to_intel_encoder(old_conn_state->best_encoder); 242 struct intel_encoder *new_encoder = 243 to_intel_encoder(new_conn_state->best_encoder); 244 245 if (!intel_connector_needs_modeset(state, connector)) 246 continue; 247 248 if (old_encoder == new_encoder || 249 (intel_pmdemand_encoder_has_tc_phy(i915, old_encoder) && 250 intel_pmdemand_encoder_has_tc_phy(i915, new_encoder))) 251 continue; 252 253 return true; 254 } 255 256 return false; 257 } 258 259 static bool intel_pmdemand_needs_update(struct intel_atomic_state *state) 260 { 261 const struct intel_bw_state *new_bw_state, *old_bw_state; 262 const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state; 263 const struct intel_crtc_state *new_crtc_state, *old_crtc_state; 264 const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state; 265 struct intel_crtc *crtc; 266 int i; 267 268 new_bw_state = intel_atomic_get_new_bw_state(state); 269 old_bw_state = intel_atomic_get_old_bw_state(state); 270 if (new_bw_state && new_bw_state->qgv_point_peakbw != 271 old_bw_state->qgv_point_peakbw) 272 return true; 273 274 new_dbuf_state = intel_atomic_get_new_dbuf_state(state); 275 old_dbuf_state = intel_atomic_get_old_dbuf_state(state); 276 if (new_dbuf_state && 277 (new_dbuf_state->active_pipes != 278 old_dbuf_state->active_pipes || 279 new_dbuf_state->enabled_slices != 280 old_dbuf_state->enabled_slices)) 281 return true; 282 283 new_cdclk_state = intel_atomic_get_new_cdclk_state(state); 284 old_cdclk_state = intel_atomic_get_old_cdclk_state(state); 285 if (new_cdclk_state && 286 (new_cdclk_state->actual.cdclk != 287 old_cdclk_state->actual.cdclk || 288 new_cdclk_state->actual.voltage_level != 289 old_cdclk_state->actual.voltage_level)) 290 return true; 291 292 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 293 new_crtc_state, i) 294 if (new_crtc_state->port_clock != old_crtc_state->port_clock) 295 return true; 296 297 return intel_pmdemand_connector_needs_update(state); 298 } 299 300 int intel_pmdemand_atomic_check(struct intel_atomic_state *state) 301 { 302 struct drm_i915_private *i915 = to_i915(state->base.dev); 303 const struct intel_bw_state *new_bw_state; 304 const struct intel_cdclk_state *new_cdclk_state; 305 const struct intel_dbuf_state *new_dbuf_state; 306 struct intel_pmdemand_state *new_pmdemand_state; 307 308 if (DISPLAY_VER(i915) < 14) 309 return 0; 310 311 if (!intel_pmdemand_needs_update(state)) 312 return 0; 313 314 new_pmdemand_state = intel_atomic_get_pmdemand_state(state); 315 if (IS_ERR(new_pmdemand_state)) 316 return PTR_ERR(new_pmdemand_state); 317 318 new_bw_state = intel_atomic_get_bw_state(state); 319 if (IS_ERR(new_bw_state)) 320 return PTR_ERR(new_bw_state); 321 322 /* firmware will calculate the qclk_gv_index, requirement is set to 0 */ 323 new_pmdemand_state->params.qclk_gv_index = 0; 324 new_pmdemand_state->params.qclk_gv_bw = new_bw_state->qgv_point_peakbw; 325 326 new_dbuf_state = intel_atomic_get_dbuf_state(state); 327 if (IS_ERR(new_dbuf_state)) 328 return PTR_ERR(new_dbuf_state); 329 330 new_pmdemand_state->params.active_pipes = 331 min_t(u8, hweight8(new_dbuf_state->active_pipes), 3); 332 new_pmdemand_state->params.active_dbufs = 333 min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3); 334 335 new_cdclk_state = intel_atomic_get_cdclk_state(state); 336 if (IS_ERR(new_cdclk_state)) 337 return PTR_ERR(new_cdclk_state); 338 339 new_pmdemand_state->params.voltage_index = 340 new_cdclk_state->actual.voltage_level; 341 new_pmdemand_state->params.cdclk_freq_mhz = 342 DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000); 343 344 intel_pmdemand_update_max_ddiclk(i915, state, new_pmdemand_state); 345 346 intel_pmdemand_update_active_non_tc_phys(i915, state, new_pmdemand_state); 347 348 /* 349 * Active_PLLs starts with 1 because of CDCLK PLL. 350 * TODO: Missing to account genlock filter when it gets used. 351 */ 352 new_pmdemand_state->params.plls = 353 min_t(u16, new_pmdemand_state->params.active_phys + 1, 7); 354 355 /* 356 * Setting scalers to max as it can not be calculated during flips and 357 * fastsets without taking global states locks. 358 */ 359 new_pmdemand_state->params.scalers = 7; 360 361 if (state->base.allow_modeset) 362 return intel_atomic_serialize_global_state(&new_pmdemand_state->base); 363 else 364 return intel_atomic_lock_global_state(&new_pmdemand_state->base); 365 } 366 367 static bool intel_pmdemand_check_prev_transaction(struct drm_i915_private *i915) 368 { 369 return !(intel_de_wait_for_clear(i915, 370 XELPDP_INITIATE_PMDEMAND_REQUEST(1), 371 XELPDP_PMDEMAND_REQ_ENABLE, 10) || 372 intel_de_wait_for_clear(i915, 373 GEN12_DCPR_STATUS_1, 374 XELPDP_PMDEMAND_INFLIGHT_STATUS, 10)); 375 } 376 377 void 378 intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915, 379 struct intel_pmdemand_state *pmdemand_state) 380 { 381 u32 reg1, reg2; 382 383 if (DISPLAY_VER(i915) < 14) 384 return; 385 386 mutex_lock(&i915->display.pmdemand.lock); 387 if (drm_WARN_ON(&i915->drm, 388 !intel_pmdemand_check_prev_transaction(i915))) { 389 memset(&pmdemand_state->params, 0, 390 sizeof(pmdemand_state->params)); 391 goto unlock; 392 } 393 394 reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0)); 395 396 reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); 397 398 /* Set 1*/ 399 pmdemand_state->params.qclk_gv_bw = 400 REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1); 401 pmdemand_state->params.voltage_index = 402 REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1); 403 pmdemand_state->params.qclk_gv_index = 404 REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1); 405 pmdemand_state->params.active_pipes = 406 REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1); 407 pmdemand_state->params.active_dbufs = 408 REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1); 409 pmdemand_state->params.active_phys = 410 REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1); 411 412 /* Set 2*/ 413 pmdemand_state->params.cdclk_freq_mhz = 414 REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2); 415 pmdemand_state->params.ddiclk_max = 416 REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2); 417 pmdemand_state->params.scalers = 418 REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2); 419 420 unlock: 421 mutex_unlock(&i915->display.pmdemand.lock); 422 } 423 424 static bool intel_pmdemand_req_complete(struct drm_i915_private *i915) 425 { 426 return !(intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) & 427 XELPDP_PMDEMAND_REQ_ENABLE); 428 } 429 430 static void intel_pmdemand_wait(struct drm_i915_private *i915) 431 { 432 if (!wait_event_timeout(i915->display.pmdemand.waitqueue, 433 intel_pmdemand_req_complete(i915), 434 msecs_to_jiffies_timeout(10))) 435 drm_err(&i915->drm, 436 "timed out waiting for Punit PM Demand Response\n"); 437 } 438 439 /* Required to be programmed during Display Init Sequences. */ 440 void intel_pmdemand_program_dbuf(struct drm_i915_private *i915, 441 u8 dbuf_slices) 442 { 443 u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3); 444 445 mutex_lock(&i915->display.pmdemand.lock); 446 if (drm_WARN_ON(&i915->drm, 447 !intel_pmdemand_check_prev_transaction(i915))) 448 goto unlock; 449 450 intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0), 451 XELPDP_PMDEMAND_DBUFS_MASK, 452 REG_FIELD_PREP(XELPDP_PMDEMAND_DBUFS_MASK, dbufs)); 453 intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0, 454 XELPDP_PMDEMAND_REQ_ENABLE); 455 456 intel_pmdemand_wait(i915); 457 458 unlock: 459 mutex_unlock(&i915->display.pmdemand.lock); 460 } 461 462 static void 463 intel_pmdemand_update_params(const struct intel_pmdemand_state *new, 464 const struct intel_pmdemand_state *old, 465 u32 *reg1, u32 *reg2, bool serialized) 466 { 467 /* 468 * The pmdemand parameter updates happens in two steps. Pre plane and 469 * post plane updates. During the pre plane, as DE might still be 470 * handling with some old operations, to avoid unexpected performance 471 * issues, program the pmdemand parameters with higher of old and new 472 * values. And then after once settled, use the new parameter values 473 * as part of the post plane update. 474 * 475 * If the pmdemand params update happens without modeset allowed, this 476 * means we can't serialize the updates. So that implies possibility of 477 * some parallel atomic commits affecting the pmdemand parameters. In 478 * that case, we need to consider the current values from the register 479 * as well. So in pre-plane case, we need to check the max of old, new 480 * and current register value if not serialized. In post plane update 481 * we need to consider max of new and current register value if not 482 * serialized 483 */ 484 485 #define update_reg(reg, field, mask) do { \ 486 u32 current_val = serialized ? 0 : REG_FIELD_GET((mask), *(reg)); \ 487 u32 old_val = old ? old->params.field : 0; \ 488 u32 new_val = new->params.field; \ 489 \ 490 *(reg) &= ~(mask); \ 491 *(reg) |= REG_FIELD_PREP((mask), max3(old_val, new_val, current_val)); \ 492 } while (0) 493 494 /* Set 1*/ 495 update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK); 496 update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK); 497 update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK); 498 update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK); 499 update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK); 500 update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK); 501 502 /* Set 2*/ 503 update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK); 504 update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK); 505 update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK); 506 update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK); 507 508 #undef update_reg 509 } 510 511 static void 512 intel_pmdemand_program_params(struct drm_i915_private *i915, 513 const struct intel_pmdemand_state *new, 514 const struct intel_pmdemand_state *old, 515 bool serialized) 516 { 517 bool changed = false; 518 u32 reg1, mod_reg1; 519 u32 reg2, mod_reg2; 520 521 mutex_lock(&i915->display.pmdemand.lock); 522 if (drm_WARN_ON(&i915->drm, 523 !intel_pmdemand_check_prev_transaction(i915))) 524 goto unlock; 525 526 reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0)); 527 mod_reg1 = reg1; 528 529 reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); 530 mod_reg2 = reg2; 531 532 intel_pmdemand_update_params(new, old, &mod_reg1, &mod_reg2, 533 serialized); 534 535 if (reg1 != mod_reg1) { 536 intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0), 537 mod_reg1); 538 changed = true; 539 } 540 541 if (reg2 != mod_reg2) { 542 intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 543 mod_reg2); 544 changed = true; 545 } 546 547 /* Initiate pm demand request only if register values are changed */ 548 if (!changed) 549 goto unlock; 550 551 drm_dbg_kms(&i915->drm, 552 "initate pmdemand request values: (0x%x 0x%x)\n", 553 mod_reg1, mod_reg2); 554 555 intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0, 556 XELPDP_PMDEMAND_REQ_ENABLE); 557 558 intel_pmdemand_wait(i915); 559 560 unlock: 561 mutex_unlock(&i915->display.pmdemand.lock); 562 } 563 564 static bool 565 intel_pmdemand_state_changed(const struct intel_pmdemand_state *new, 566 const struct intel_pmdemand_state *old) 567 { 568 return memcmp(&new->params, &old->params, sizeof(new->params)) != 0; 569 } 570 571 void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state) 572 { 573 struct drm_i915_private *i915 = to_i915(state->base.dev); 574 const struct intel_pmdemand_state *new_pmdemand_state = 575 intel_atomic_get_new_pmdemand_state(state); 576 const struct intel_pmdemand_state *old_pmdemand_state = 577 intel_atomic_get_old_pmdemand_state(state); 578 579 if (DISPLAY_VER(i915) < 14) 580 return; 581 582 if (!new_pmdemand_state || 583 !intel_pmdemand_state_changed(new_pmdemand_state, 584 old_pmdemand_state)) 585 return; 586 587 WARN_ON(!new_pmdemand_state->base.changed); 588 589 intel_pmdemand_program_params(i915, new_pmdemand_state, 590 old_pmdemand_state, 591 intel_atomic_global_state_is_serialized(state)); 592 } 593 594 void intel_pmdemand_post_plane_update(struct intel_atomic_state *state) 595 { 596 struct drm_i915_private *i915 = to_i915(state->base.dev); 597 const struct intel_pmdemand_state *new_pmdemand_state = 598 intel_atomic_get_new_pmdemand_state(state); 599 const struct intel_pmdemand_state *old_pmdemand_state = 600 intel_atomic_get_old_pmdemand_state(state); 601 602 if (DISPLAY_VER(i915) < 14) 603 return; 604 605 if (!new_pmdemand_state || 606 !intel_pmdemand_state_changed(new_pmdemand_state, 607 old_pmdemand_state)) 608 return; 609 610 WARN_ON(!new_pmdemand_state->base.changed); 611 612 intel_pmdemand_program_params(i915, new_pmdemand_state, NULL, 613 intel_atomic_global_state_is_serialized(state)); 614 } 615