1 /* 2 * Copyright 2022 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include "../dmub_srv.h" 27 #include "dmub_reg.h" 28 #include "dmub_dcn32.h" 29 #include "dc/dc_types.h" 30 #include "dc_hw_types.h" 31 32 #include "dcn/dcn_3_2_0_offset.h" 33 #include "dcn/dcn_3_2_0_sh_mask.h" 34 35 #define DCN_BASE__INST0_SEG2 0x000034C0 36 37 #define BASE_INNER(seg) ctx->dcn_reg_offsets[seg] 38 #define CTX dmub 39 #define REGS dmub->regs_dcn32 40 #define REG_OFFSET_EXP(reg_name) BASE(reg##reg_name##_BASE_IDX) + reg##reg_name 41 42 void dmub_srv_dcn32_regs_init(struct dmub_srv *dmub, struct dc_context *ctx) 43 { 44 struct dmub_srv_dcn32_regs *regs = dmub->regs_dcn32; 45 46 #define REG_STRUCT regs 47 48 #define DMUB_SR(reg) REG_STRUCT->offset.reg = REG_OFFSET_EXP(reg); 49 DMUB_DCN32_REGS() 50 DMCUB_INTERNAL_REGS() 51 #undef DMUB_SR 52 53 #define DMUB_SF(reg, field) REG_STRUCT->mask.reg##__##field = FD_MASK(reg, field); 54 DMUB_DCN32_FIELDS() 55 #undef DMUB_SF 56 57 #define DMUB_SF(reg, field) REG_STRUCT->shift.reg##__##field = FD_SHIFT(reg, field); 58 DMUB_DCN32_FIELDS() 59 #undef DMUB_SF 60 61 #undef REG_STRUCT 62 } 63 64 static void dmub_dcn32_get_fb_base_offset(struct dmub_srv *dmub, 65 uint64_t *fb_base, 66 uint64_t *fb_offset) 67 { 68 uint32_t tmp; 69 70 if (dmub->fb_base || dmub->fb_offset) { 71 *fb_base = dmub->fb_base; 72 *fb_offset = dmub->fb_offset; 73 return; 74 } 75 76 REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp); 77 *fb_base = (uint64_t)tmp << 24; 78 79 REG_GET(DCN_VM_FB_OFFSET, FB_OFFSET, &tmp); 80 *fb_offset = (uint64_t)tmp << 24; 81 } 82 83 static inline void dmub_dcn32_translate_addr(const union dmub_addr *addr_in, 84 uint64_t fb_base, 85 uint64_t fb_offset, 86 union dmub_addr *addr_out) 87 { 88 addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset; 89 } 90 91 void dmub_dcn32_reset(struct dmub_srv *dmub) 92 { 93 union dmub_gpint_data_register cmd; 94 const uint32_t timeout = 30; 95 uint32_t in_reset, scratch, i; 96 97 REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); 98 99 if (in_reset == 0) { 100 cmd.bits.status = 1; 101 cmd.bits.command_code = DMUB_GPINT__STOP_FW; 102 cmd.bits.param = 0; 103 104 dmub->hw_funcs.set_gpint(dmub, cmd); 105 106 /** 107 * Timeout covers both the ACK and the wait 108 * for remaining work to finish. 109 * 110 * This is mostly bound by the PHY disable sequence. 111 * Each register check will be greater than 1us, so 112 * don't bother using udelay. 113 */ 114 115 for (i = 0; i < timeout; ++i) { 116 if (dmub->hw_funcs.is_gpint_acked(dmub, cmd)) 117 break; 118 } 119 120 for (i = 0; i < timeout; ++i) { 121 scratch = dmub->hw_funcs.get_gpint_response(dmub); 122 if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) 123 break; 124 } 125 126 /* Force reset in case we timed out, DMCUB is likely hung. */ 127 } 128 129 REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); 130 REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); 131 REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); 132 REG_WRITE(DMCUB_INBOX1_RPTR, 0); 133 REG_WRITE(DMCUB_INBOX1_WPTR, 0); 134 REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); 135 REG_WRITE(DMCUB_OUTBOX1_WPTR, 0); 136 REG_WRITE(DMCUB_OUTBOX0_RPTR, 0); 137 REG_WRITE(DMCUB_OUTBOX0_WPTR, 0); 138 REG_WRITE(DMCUB_SCRATCH0, 0); 139 140 /* Clear the GPINT command manually so we don't reset again. */ 141 cmd.all = 0; 142 dmub->hw_funcs.set_gpint(dmub, cmd); 143 } 144 145 void dmub_dcn32_reset_release(struct dmub_srv *dmub) 146 { 147 REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0); 148 REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF); 149 REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1); 150 REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 0); 151 } 152 153 void dmub_dcn32_backdoor_load(struct dmub_srv *dmub, 154 const struct dmub_window *cw0, 155 const struct dmub_window *cw1) 156 { 157 union dmub_addr offset; 158 uint64_t fb_base, fb_offset; 159 160 dmub_dcn32_get_fb_base_offset(dmub, &fb_base, &fb_offset); 161 162 REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); 163 164 dmub_dcn32_translate_addr(&cw0->offset, fb_base, fb_offset, &offset); 165 166 REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part); 167 REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part); 168 REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base); 169 REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0, 170 DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top, 171 DMCUB_REGION3_CW0_ENABLE, 1); 172 173 dmub_dcn32_translate_addr(&cw1->offset, fb_base, fb_offset, &offset); 174 175 REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part); 176 REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part); 177 REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base); 178 REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0, 179 DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, 180 DMCUB_REGION3_CW1_ENABLE, 1); 181 182 REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID, 183 0x20); 184 } 185 186 void dmub_dcn32_backdoor_load_zfb_mode(struct dmub_srv *dmub, 187 const struct dmub_window *cw0, 188 const struct dmub_window *cw1) 189 { 190 union dmub_addr offset; 191 192 REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); 193 194 offset = cw0->offset; 195 196 REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part); 197 REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part); 198 REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base); 199 REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0, 200 DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top, 201 DMCUB_REGION3_CW0_ENABLE, 1); 202 203 offset = cw1->offset; 204 205 REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part); 206 REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part); 207 REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base); 208 REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0, 209 DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, 210 DMCUB_REGION3_CW1_ENABLE, 1); 211 212 REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID, 213 0x20); 214 } 215 216 void dmub_dcn32_setup_windows(struct dmub_srv *dmub, 217 const struct dmub_window *cw2, 218 const struct dmub_window *cw3, 219 const struct dmub_window *cw4, 220 const struct dmub_window *cw5, 221 const struct dmub_window *cw6) 222 { 223 union dmub_addr offset; 224 225 offset = cw3->offset; 226 227 REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part); 228 REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part); 229 REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base); 230 REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0, 231 DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top, 232 DMCUB_REGION3_CW3_ENABLE, 1); 233 234 offset = cw4->offset; 235 236 REG_WRITE(DMCUB_REGION3_CW4_OFFSET, offset.u.low_part); 237 REG_WRITE(DMCUB_REGION3_CW4_OFFSET_HIGH, offset.u.high_part); 238 REG_WRITE(DMCUB_REGION3_CW4_BASE_ADDRESS, cw4->region.base); 239 REG_SET_2(DMCUB_REGION3_CW4_TOP_ADDRESS, 0, 240 DMCUB_REGION3_CW4_TOP_ADDRESS, cw4->region.top, 241 DMCUB_REGION3_CW4_ENABLE, 1); 242 243 offset = cw5->offset; 244 245 REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part); 246 REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part); 247 REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base); 248 REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0, 249 DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, 250 DMCUB_REGION3_CW5_ENABLE, 1); 251 252 REG_WRITE(DMCUB_REGION5_OFFSET, offset.u.low_part); 253 REG_WRITE(DMCUB_REGION5_OFFSET_HIGH, offset.u.high_part); 254 REG_SET_2(DMCUB_REGION5_TOP_ADDRESS, 0, 255 DMCUB_REGION5_TOP_ADDRESS, 256 cw5->region.top - cw5->region.base - 1, 257 DMCUB_REGION5_ENABLE, 1); 258 259 offset = cw6->offset; 260 261 REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part); 262 REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part); 263 REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base); 264 REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0, 265 DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top, 266 DMCUB_REGION3_CW6_ENABLE, 1); 267 } 268 269 void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub, 270 const struct dmub_region *inbox1) 271 { 272 REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, inbox1->base); 273 REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base); 274 } 275 276 uint32_t dmub_dcn32_get_inbox1_wptr(struct dmub_srv *dmub) 277 { 278 return REG_READ(DMCUB_INBOX1_WPTR); 279 } 280 281 uint32_t dmub_dcn32_get_inbox1_rptr(struct dmub_srv *dmub) 282 { 283 return REG_READ(DMCUB_INBOX1_RPTR); 284 } 285 286 void dmub_dcn32_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset) 287 { 288 REG_WRITE(DMCUB_INBOX1_WPTR, wptr_offset); 289 } 290 291 void dmub_dcn32_setup_out_mailbox(struct dmub_srv *dmub, 292 const struct dmub_region *outbox1) 293 { 294 REG_WRITE(DMCUB_OUTBOX1_BASE_ADDRESS, outbox1->base); 295 REG_WRITE(DMCUB_OUTBOX1_SIZE, outbox1->top - outbox1->base); 296 } 297 298 uint32_t dmub_dcn32_get_outbox1_wptr(struct dmub_srv *dmub) 299 { 300 /** 301 * outbox1 wptr register is accessed without locks (dal & dc) 302 * and to be called only by dmub_srv_stat_get_notification() 303 */ 304 return REG_READ(DMCUB_OUTBOX1_WPTR); 305 } 306 307 void dmub_dcn32_set_outbox1_rptr(struct dmub_srv *dmub, uint32_t rptr_offset) 308 { 309 /** 310 * outbox1 rptr register is accessed without locks (dal & dc) 311 * and to be called only by dmub_srv_stat_get_notification() 312 */ 313 REG_WRITE(DMCUB_OUTBOX1_RPTR, rptr_offset); 314 } 315 316 bool dmub_dcn32_is_hw_init(struct dmub_srv *dmub) 317 { 318 union dmub_fw_boot_status status; 319 uint32_t is_hw_init; 320 321 status.all = REG_READ(DMCUB_SCRATCH0); 322 REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_hw_init); 323 324 return is_hw_init != 0 && status.bits.dal_fw; 325 } 326 327 bool dmub_dcn32_is_supported(struct dmub_srv *dmub) 328 { 329 uint32_t supported = 0; 330 331 REG_GET(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE, &supported); 332 333 return supported; 334 } 335 336 void dmub_dcn32_set_gpint(struct dmub_srv *dmub, 337 union dmub_gpint_data_register reg) 338 { 339 REG_WRITE(DMCUB_GPINT_DATAIN1, reg.all); 340 } 341 342 bool dmub_dcn32_is_gpint_acked(struct dmub_srv *dmub, 343 union dmub_gpint_data_register reg) 344 { 345 union dmub_gpint_data_register test; 346 347 reg.bits.status = 0; 348 test.all = REG_READ(DMCUB_GPINT_DATAIN1); 349 350 return test.all == reg.all; 351 } 352 353 uint32_t dmub_dcn32_get_gpint_response(struct dmub_srv *dmub) 354 { 355 return REG_READ(DMCUB_SCRATCH7); 356 } 357 358 uint32_t dmub_dcn32_get_gpint_dataout(struct dmub_srv *dmub) 359 { 360 uint32_t dataout = REG_READ(DMCUB_GPINT_DATAOUT); 361 362 REG_UPDATE(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN, 0); 363 364 REG_WRITE(DMCUB_GPINT_DATAOUT, 0); 365 REG_UPDATE(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK, 1); 366 REG_UPDATE(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK, 0); 367 368 REG_UPDATE(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN, 1); 369 370 return dataout; 371 } 372 373 union dmub_fw_boot_status dmub_dcn32_get_fw_boot_status(struct dmub_srv *dmub) 374 { 375 union dmub_fw_boot_status status; 376 377 status.all = REG_READ(DMCUB_SCRATCH0); 378 return status; 379 } 380 381 void dmub_dcn32_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params) 382 { 383 union dmub_fw_boot_options boot_options = {0}; 384 385 boot_options.bits.z10_disable = params->disable_z10; 386 387 REG_WRITE(DMCUB_SCRATCH14, boot_options.all); 388 } 389 390 void dmub_dcn32_skip_dmub_panel_power_sequence(struct dmub_srv *dmub, bool skip) 391 { 392 union dmub_fw_boot_options boot_options; 393 boot_options.all = REG_READ(DMCUB_SCRATCH14); 394 boot_options.bits.skip_phy_init_panel_sequence = skip; 395 REG_WRITE(DMCUB_SCRATCH14, boot_options.all); 396 } 397 398 void dmub_dcn32_setup_outbox0(struct dmub_srv *dmub, 399 const struct dmub_region *outbox0) 400 { 401 REG_WRITE(DMCUB_OUTBOX0_BASE_ADDRESS, outbox0->base); 402 403 REG_WRITE(DMCUB_OUTBOX0_SIZE, outbox0->top - outbox0->base); 404 } 405 406 uint32_t dmub_dcn32_get_outbox0_wptr(struct dmub_srv *dmub) 407 { 408 return REG_READ(DMCUB_OUTBOX0_WPTR); 409 } 410 411 void dmub_dcn32_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset) 412 { 413 REG_WRITE(DMCUB_OUTBOX0_RPTR, rptr_offset); 414 } 415 416 uint32_t dmub_dcn32_get_current_time(struct dmub_srv *dmub) 417 { 418 return REG_READ(DMCUB_TIMER_CURRENT); 419 } 420 421 void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data) 422 { 423 uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; 424 uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled; 425 426 if (!dmub || !diag_data) 427 return; 428 429 memset(diag_data, 0, sizeof(*diag_data)); 430 431 diag_data->dmcub_version = dmub->fw_version; 432 433 diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0); 434 diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1); 435 diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2); 436 diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3); 437 diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4); 438 diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5); 439 diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6); 440 diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7); 441 diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8); 442 diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9); 443 diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10); 444 diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11); 445 diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12); 446 diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13); 447 diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14); 448 diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15); 449 diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16); 450 451 diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR); 452 diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR); 453 diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR); 454 455 diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR); 456 diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR); 457 diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE); 458 459 diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR); 460 diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); 461 diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); 462 463 REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); 464 diag_data->is_dmcub_enabled = is_dmub_enabled; 465 466 REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); 467 diag_data->is_dmcub_soft_reset = is_soft_reset; 468 469 REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); 470 diag_data->is_dmcub_secure_reset = is_sec_reset; 471 472 REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); 473 diag_data->is_traceport_en = is_traceport_enabled; 474 475 REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled); 476 diag_data->is_cw0_enabled = is_cw0_enabled; 477 478 REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); 479 diag_data->is_cw6_enabled = is_cw6_enabled; 480 481 diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); 482 } 483 void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub) 484 { 485 /* DMCUB_REGION3_TMR_AXI_SPACE values: 486 * 0b011 (0x3) - FB physical address 487 * 0b100 (0x4) - GPU virtual address 488 * 489 * Default value is 0x3 (FB Physical address for TMR). When programming 490 * DMUB to be in system memory, change to 0x4. The system memory allocated 491 * is accessible by both GPU and CPU, so we use GPU virtual address. 492 */ 493 REG_WRITE(DMCUB_REGION3_TMR_AXI_SPACE, 0x4); 494 } 495 496 void dmub_dcn32_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data) 497 { 498 REG_WRITE(DMCUB_INBOX0_WPTR, data.inbox0_cmd_common.all); 499 } 500 501 void dmub_dcn32_clear_inbox0_ack_register(struct dmub_srv *dmub) 502 { 503 REG_WRITE(DMCUB_SCRATCH17, 0); 504 } 505 506 uint32_t dmub_dcn32_read_inbox0_ack_register(struct dmub_srv *dmub) 507 { 508 return REG_READ(DMCUB_SCRATCH17); 509 } 510 511 void dmub_dcn32_save_surf_addr(struct dmub_srv *dmub, const struct dc_plane_address *addr, uint8_t subvp_index) 512 { 513 uint32_t index = 0; 514 515 if (subvp_index == 0) { 516 index = REG_READ(DMCUB_SCRATCH15); 517 if (index) { 518 REG_WRITE(DMCUB_SCRATCH9, addr->grph.addr.low_part); 519 REG_WRITE(DMCUB_SCRATCH11, addr->grph.meta_addr.low_part); 520 } else { 521 REG_WRITE(DMCUB_SCRATCH12, addr->grph.addr.low_part); 522 REG_WRITE(DMCUB_SCRATCH13, addr->grph.meta_addr.low_part); 523 } 524 REG_WRITE(DMCUB_SCRATCH15, !index); 525 } else if (subvp_index == 1) { 526 index = REG_READ(DMCUB_SCRATCH23); 527 if (index) { 528 REG_WRITE(DMCUB_SCRATCH18, addr->grph.addr.low_part); 529 REG_WRITE(DMCUB_SCRATCH19, addr->grph.meta_addr.low_part); 530 } else { 531 REG_WRITE(DMCUB_SCRATCH20, addr->grph.addr.low_part); 532 REG_WRITE(DMCUB_SCRATCH22, addr->grph.meta_addr.low_part); 533 } 534 REG_WRITE(DMCUB_SCRATCH23, !index); 535 } else { 536 return; 537 } 538 } 539