1 /* 2 * Copyright 2022 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include "../dmub_srv.h" 27 #include "dc_types.h" 28 #include "dmub_reg.h" 29 #include "dmub_dcn35.h" 30 #include "dc/dc_types.h" 31 32 #include "dcn/dcn_3_5_0_offset.h" 33 #include "dcn/dcn_3_5_0_sh_mask.h" 34 35 #define BASE_INNER(seg) ctx->dcn_reg_offsets[seg] 36 #define CTX dmub 37 #define REGS dmub->regs_dcn35 38 #define REG_OFFSET_EXP(reg_name) BASE(reg##reg_name##_BASE_IDX) + reg##reg_name 39 40 void dmub_srv_dcn35_regs_init(struct dmub_srv *dmub, struct dc_context *ctx) { 41 struct dmub_srv_dcn35_regs *regs = dmub->regs_dcn35; 42 #define REG_STRUCT regs 43 44 #define DMUB_SR(reg) REG_STRUCT->offset.reg = REG_OFFSET_EXP(reg); 45 DMUB_DCN35_REGS() 46 DMCUB_INTERNAL_REGS() 47 #undef DMUB_SR 48 49 #define DMUB_SF(reg, field) REG_STRUCT->mask.reg##__##field = FD_MASK(reg, field); 50 DMUB_DCN35_FIELDS() 51 #undef DMUB_SF 52 53 #define DMUB_SF(reg, field) REG_STRUCT->shift.reg##__##field = FD_SHIFT(reg, field); 54 DMUB_DCN35_FIELDS() 55 #undef DMUB_SF 56 #undef REG_STRUCT 57 } 58 59 static void dmub_dcn35_get_fb_base_offset(struct dmub_srv *dmub, 60 uint64_t *fb_base, 61 uint64_t *fb_offset) 62 { 63 uint32_t tmp; 64 65 /* 66 if (dmub->fb_base || dmub->fb_offset) { 67 *fb_base = dmub->fb_base; 68 *fb_offset = dmub->fb_offset; 69 return; 70 } 71 */ 72 73 REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp); 74 *fb_base = (uint64_t)tmp << 24; 75 76 REG_GET(DCN_VM_FB_OFFSET, FB_OFFSET, &tmp); 77 *fb_offset = (uint64_t)tmp << 24; 78 } 79 80 static inline void dmub_dcn35_translate_addr(const union dmub_addr *addr_in, 81 uint64_t fb_base, 82 uint64_t fb_offset, 83 union dmub_addr *addr_out) 84 { 85 addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset; 86 } 87 88 void dmub_dcn35_reset(struct dmub_srv *dmub) 89 { 90 union dmub_gpint_data_register cmd; 91 const uint32_t timeout = 100; 92 uint32_t in_reset, is_enabled, scratch, i, pwait_mode; 93 94 REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); 95 96 if (in_reset == 0) { 97 cmd.bits.status = 1; 98 cmd.bits.command_code = DMUB_GPINT__STOP_FW; 99 cmd.bits.param = 0; 100 101 dmub->hw_funcs.set_gpint(dmub, cmd); 102 103 /** 104 * Timeout covers both the ACK and the wait 105 * for remaining work to finish. 106 */ 107 108 for (i = 0; i < timeout; ++i) { 109 if (dmub->hw_funcs.is_gpint_acked(dmub, cmd)) 110 break; 111 112 udelay(1); 113 } 114 115 for (i = 0; i < timeout; ++i) { 116 scratch = dmub->hw_funcs.get_gpint_response(dmub); 117 if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) 118 break; 119 120 udelay(1); 121 } 122 123 for (i = 0; i < timeout; ++i) { 124 REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode); 125 if (pwait_mode & (1 << 0)) 126 break; 127 128 udelay(1); 129 } 130 /* Force reset in case we timed out, DMCUB is likely hung. */ 131 } 132 133 REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled); 134 135 if (is_enabled) { 136 REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); 137 REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); 138 REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); 139 } 140 141 REG_WRITE(DMCUB_INBOX1_RPTR, 0); 142 REG_WRITE(DMCUB_INBOX1_WPTR, 0); 143 REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); 144 REG_WRITE(DMCUB_OUTBOX1_WPTR, 0); 145 REG_WRITE(DMCUB_OUTBOX0_RPTR, 0); 146 REG_WRITE(DMCUB_OUTBOX0_WPTR, 0); 147 REG_WRITE(DMCUB_SCRATCH0, 0); 148 149 /* Clear the GPINT command manually so we don't send anything during boot. */ 150 cmd.all = 0; 151 dmub->hw_funcs.set_gpint(dmub, cmd); 152 } 153 154 void dmub_dcn35_reset_release(struct dmub_srv *dmub) 155 { 156 REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF); 157 158 REG_UPDATE_3(DMU_CLK_CNTL, 159 LONO_DISPCLK_GATE_DISABLE, 1, 160 LONO_SOCCLK_GATE_DISABLE, 1, 161 LONO_DMCUBCLK_GATE_DISABLE, 1); 162 163 REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); 164 udelay(1); 165 REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1); 166 REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); 167 udelay(1); 168 REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0); 169 REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 0); 170 } 171 172 void dmub_dcn35_backdoor_load(struct dmub_srv *dmub, 173 const struct dmub_window *cw0, 174 const struct dmub_window *cw1) 175 { 176 union dmub_addr offset; 177 uint64_t fb_base, fb_offset; 178 179 dmub_dcn35_get_fb_base_offset(dmub, &fb_base, &fb_offset); 180 181 dmub_dcn35_translate_addr(&cw0->offset, fb_base, fb_offset, &offset); 182 183 REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part); 184 REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part); 185 REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base); 186 REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0, 187 DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top, 188 DMCUB_REGION3_CW0_ENABLE, 1); 189 190 dmub_dcn35_translate_addr(&cw1->offset, fb_base, fb_offset, &offset); 191 192 REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part); 193 REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part); 194 REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base); 195 REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0, 196 DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, 197 DMCUB_REGION3_CW1_ENABLE, 1); 198 199 /* TODO: Do we need to set DMCUB_MEM_UNIT_ID? */ 200 REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0); 201 } 202 203 void dmub_dcn35_backdoor_load_zfb_mode(struct dmub_srv *dmub, 204 const struct dmub_window *cw0, 205 const struct dmub_window *cw1) 206 { 207 union dmub_addr offset; 208 209 REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); 210 offset = cw0->offset; 211 REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part); 212 REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part); 213 REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base); 214 REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0, 215 DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top, 216 DMCUB_REGION3_CW0_ENABLE, 1); 217 offset = cw1->offset; 218 REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part); 219 REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part); 220 REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base); 221 REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0, 222 DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, 223 DMCUB_REGION3_CW1_ENABLE, 1); 224 REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID, 225 0x20); 226 } 227 void dmub_dcn35_setup_windows(struct dmub_srv *dmub, 228 const struct dmub_window *cw2, 229 const struct dmub_window *cw3, 230 const struct dmub_window *cw4, 231 const struct dmub_window *cw5, 232 const struct dmub_window *cw6, 233 const struct dmub_window *region6) 234 { 235 union dmub_addr offset; 236 237 offset = cw3->offset; 238 239 REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part); 240 REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part); 241 REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base); 242 REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0, 243 DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top, 244 DMCUB_REGION3_CW3_ENABLE, 1); 245 246 offset = cw4->offset; 247 248 REG_WRITE(DMCUB_REGION3_CW4_OFFSET, offset.u.low_part); 249 REG_WRITE(DMCUB_REGION3_CW4_OFFSET_HIGH, offset.u.high_part); 250 REG_WRITE(DMCUB_REGION3_CW4_BASE_ADDRESS, cw4->region.base); 251 REG_SET_2(DMCUB_REGION3_CW4_TOP_ADDRESS, 0, 252 DMCUB_REGION3_CW4_TOP_ADDRESS, cw4->region.top, 253 DMCUB_REGION3_CW4_ENABLE, 1); 254 255 offset = cw5->offset; 256 257 REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part); 258 REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part); 259 REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base); 260 REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0, 261 DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, 262 DMCUB_REGION3_CW5_ENABLE, 1); 263 264 REG_WRITE(DMCUB_REGION5_OFFSET, offset.u.low_part); 265 REG_WRITE(DMCUB_REGION5_OFFSET_HIGH, offset.u.high_part); 266 REG_SET_2(DMCUB_REGION5_TOP_ADDRESS, 0, 267 DMCUB_REGION5_TOP_ADDRESS, 268 cw5->region.top - cw5->region.base - 1, 269 DMCUB_REGION5_ENABLE, 1); 270 271 offset = cw6->offset; 272 273 REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part); 274 REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part); 275 REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base); 276 REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0, 277 DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top, 278 DMCUB_REGION3_CW6_ENABLE, 1); 279 280 offset = region6->offset; 281 282 REG_WRITE(DMCUB_REGION6_OFFSET, offset.u.low_part); 283 REG_WRITE(DMCUB_REGION6_OFFSET_HIGH, offset.u.high_part); 284 REG_SET_2(DMCUB_REGION6_TOP_ADDRESS, 0, 285 DMCUB_REGION6_TOP_ADDRESS, 286 region6->region.top - region6->region.base - 1, 287 DMCUB_REGION6_ENABLE, 1); 288 } 289 290 void dmub_dcn35_setup_mailbox(struct dmub_srv *dmub, 291 const struct dmub_region *inbox1) 292 { 293 REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, inbox1->base); 294 REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base); 295 } 296 297 uint32_t dmub_dcn35_get_inbox1_wptr(struct dmub_srv *dmub) 298 { 299 return REG_READ(DMCUB_INBOX1_WPTR); 300 } 301 302 uint32_t dmub_dcn35_get_inbox1_rptr(struct dmub_srv *dmub) 303 { 304 return REG_READ(DMCUB_INBOX1_RPTR); 305 } 306 307 void dmub_dcn35_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset) 308 { 309 REG_WRITE(DMCUB_INBOX1_WPTR, wptr_offset); 310 } 311 312 void dmub_dcn35_setup_out_mailbox(struct dmub_srv *dmub, 313 const struct dmub_region *outbox1) 314 { 315 REG_WRITE(DMCUB_OUTBOX1_BASE_ADDRESS, outbox1->base); 316 REG_WRITE(DMCUB_OUTBOX1_SIZE, outbox1->top - outbox1->base); 317 } 318 319 uint32_t dmub_dcn35_get_outbox1_wptr(struct dmub_srv *dmub) 320 { 321 /** 322 * outbox1 wptr register is accessed without locks (dal & dc) 323 * and to be called only by dmub_srv_stat_get_notification() 324 */ 325 return REG_READ(DMCUB_OUTBOX1_WPTR); 326 } 327 328 void dmub_dcn35_set_outbox1_rptr(struct dmub_srv *dmub, uint32_t rptr_offset) 329 { 330 /** 331 * outbox1 rptr register is accessed without locks (dal & dc) 332 * and to be called only by dmub_srv_stat_get_notification() 333 */ 334 REG_WRITE(DMCUB_OUTBOX1_RPTR, rptr_offset); 335 } 336 337 bool dmub_dcn35_is_hw_init(struct dmub_srv *dmub) 338 { 339 union dmub_fw_boot_status status; 340 uint32_t is_enable; 341 342 status.all = REG_READ(DMCUB_SCRATCH0); 343 REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enable); 344 345 return is_enable != 0 && status.bits.dal_fw; 346 } 347 348 bool dmub_dcn35_is_supported(struct dmub_srv *dmub) 349 { 350 uint32_t supported = 0; 351 352 REG_GET(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE, &supported); 353 354 return supported; 355 } 356 357 void dmub_dcn35_set_gpint(struct dmub_srv *dmub, 358 union dmub_gpint_data_register reg) 359 { 360 REG_WRITE(DMCUB_GPINT_DATAIN1, reg.all); 361 } 362 363 bool dmub_dcn35_is_gpint_acked(struct dmub_srv *dmub, 364 union dmub_gpint_data_register reg) 365 { 366 union dmub_gpint_data_register test; 367 368 reg.bits.status = 0; 369 test.all = REG_READ(DMCUB_GPINT_DATAIN1); 370 371 return test.all == reg.all; 372 } 373 374 uint32_t dmub_dcn35_get_gpint_response(struct dmub_srv *dmub) 375 { 376 return REG_READ(DMCUB_SCRATCH7); 377 } 378 379 uint32_t dmub_dcn35_get_gpint_dataout(struct dmub_srv *dmub) 380 { 381 uint32_t dataout = REG_READ(DMCUB_GPINT_DATAOUT); 382 383 REG_UPDATE(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN, 0); 384 385 REG_WRITE(DMCUB_GPINT_DATAOUT, 0); 386 REG_UPDATE(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK, 1); 387 REG_UPDATE(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK, 0); 388 389 REG_UPDATE(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN, 1); 390 391 return dataout; 392 } 393 394 union dmub_fw_boot_status dmub_dcn35_get_fw_boot_status(struct dmub_srv *dmub) 395 { 396 union dmub_fw_boot_status status; 397 398 status.all = REG_READ(DMCUB_SCRATCH0); 399 return status; 400 } 401 402 union dmub_fw_boot_options dmub_dcn35_get_fw_boot_option(struct dmub_srv *dmub) 403 { 404 union dmub_fw_boot_options option; 405 406 option.all = REG_READ(DMCUB_SCRATCH14); 407 return option; 408 } 409 410 void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params) 411 { 412 union dmub_fw_boot_options boot_options = {0}; 413 union dmub_fw_boot_options cur_boot_options = {0}; 414 415 cur_boot_options = dmub_dcn35_get_fw_boot_option(dmub); 416 417 boot_options.bits.z10_disable = params->disable_z10; 418 boot_options.bits.dpia_supported = params->dpia_supported; 419 boot_options.bits.enable_dpia = cur_boot_options.bits.enable_dpia && !params->disable_dpia; 420 boot_options.bits.usb4_cm_version = params->usb4_cm_version; 421 boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported; 422 boot_options.bits.power_optimization = params->power_optimization; 423 boot_options.bits.disable_clk_ds = params->disallow_dispclk_dppclk_ds; 424 boot_options.bits.disable_clk_gate = params->disable_clock_gate; 425 boot_options.bits.ips_disable = params->disable_ips; 426 boot_options.bits.ips_sequential_ono = params->ips_sequential_ono; 427 boot_options.bits.disable_sldo_opt = params->disable_sldo_opt; 428 boot_options.bits.enable_non_transparent_setconfig = params->enable_non_transparent_setconfig; 429 430 REG_WRITE(DMCUB_SCRATCH14, boot_options.all); 431 } 432 433 void dmub_dcn35_skip_dmub_panel_power_sequence(struct dmub_srv *dmub, bool skip) 434 { 435 union dmub_fw_boot_options boot_options; 436 boot_options.all = REG_READ(DMCUB_SCRATCH14); 437 boot_options.bits.skip_phy_init_panel_sequence = skip; 438 REG_WRITE(DMCUB_SCRATCH14, boot_options.all); 439 } 440 441 void dmub_dcn35_setup_outbox0(struct dmub_srv *dmub, 442 const struct dmub_region *outbox0) 443 { 444 REG_WRITE(DMCUB_OUTBOX0_BASE_ADDRESS, outbox0->base); 445 446 REG_WRITE(DMCUB_OUTBOX0_SIZE, outbox0->top - outbox0->base); 447 } 448 449 uint32_t dmub_dcn35_get_outbox0_wptr(struct dmub_srv *dmub) 450 { 451 return REG_READ(DMCUB_OUTBOX0_WPTR); 452 } 453 454 void dmub_dcn35_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset) 455 { 456 REG_WRITE(DMCUB_OUTBOX0_RPTR, rptr_offset); 457 } 458 459 uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub) 460 { 461 return REG_READ(DMCUB_TIMER_CURRENT); 462 } 463 464 void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data) 465 { 466 uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; 467 uint32_t is_traceport_enabled, is_cw6_enabled; 468 469 if (!dmub || !diag_data) 470 return; 471 472 memset(diag_data, 0, sizeof(*diag_data)); 473 474 diag_data->dmcub_version = dmub->fw_version; 475 476 diag_data->scratch[0] = REG_READ(DMCUB_SCRATCH0); 477 diag_data->scratch[1] = REG_READ(DMCUB_SCRATCH1); 478 diag_data->scratch[2] = REG_READ(DMCUB_SCRATCH2); 479 diag_data->scratch[3] = REG_READ(DMCUB_SCRATCH3); 480 diag_data->scratch[4] = REG_READ(DMCUB_SCRATCH4); 481 diag_data->scratch[5] = REG_READ(DMCUB_SCRATCH5); 482 diag_data->scratch[6] = REG_READ(DMCUB_SCRATCH6); 483 diag_data->scratch[7] = REG_READ(DMCUB_SCRATCH7); 484 diag_data->scratch[8] = REG_READ(DMCUB_SCRATCH8); 485 diag_data->scratch[9] = REG_READ(DMCUB_SCRATCH9); 486 diag_data->scratch[10] = REG_READ(DMCUB_SCRATCH10); 487 diag_data->scratch[11] = REG_READ(DMCUB_SCRATCH11); 488 diag_data->scratch[12] = REG_READ(DMCUB_SCRATCH12); 489 diag_data->scratch[13] = REG_READ(DMCUB_SCRATCH13); 490 diag_data->scratch[14] = REG_READ(DMCUB_SCRATCH14); 491 diag_data->scratch[15] = REG_READ(DMCUB_SCRATCH15); 492 diag_data->scratch[16] = REG_READ(DMCUB_SCRATCH16); 493 494 diag_data->undefined_address_fault_addr = REG_READ(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR); 495 diag_data->inst_fetch_fault_addr = REG_READ(DMCUB_INST_FETCH_FAULT_ADDR); 496 diag_data->data_write_fault_addr = REG_READ(DMCUB_DATA_WRITE_FAULT_ADDR); 497 498 diag_data->inbox1_rptr = REG_READ(DMCUB_INBOX1_RPTR); 499 diag_data->inbox1_wptr = REG_READ(DMCUB_INBOX1_WPTR); 500 diag_data->inbox1_size = REG_READ(DMCUB_INBOX1_SIZE); 501 502 diag_data->inbox0_rptr = REG_READ(DMCUB_INBOX0_RPTR); 503 diag_data->inbox0_wptr = REG_READ(DMCUB_INBOX0_WPTR); 504 diag_data->inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); 505 506 diag_data->outbox1_rptr = REG_READ(DMCUB_OUTBOX1_RPTR); 507 diag_data->outbox1_wptr = REG_READ(DMCUB_OUTBOX1_WPTR); 508 diag_data->outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE); 509 510 REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); 511 diag_data->is_dmcub_enabled = is_dmub_enabled; 512 513 REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); 514 diag_data->is_dmcub_soft_reset = is_soft_reset; 515 516 REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); 517 diag_data->is_dmcub_secure_reset = is_sec_reset; 518 519 REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); 520 diag_data->is_traceport_en = is_traceport_enabled; 521 522 REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); 523 diag_data->is_cw6_enabled = is_cw6_enabled; 524 525 diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); 526 diag_data->timeout_info = dmub->debug; 527 } 528 void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub) 529 { 530 /* DMCUB_REGION3_TMR_AXI_SPACE values: 531 * 0b011 (0x3) - FB physical address 532 * 0b100 (0x4) - GPU virtual address 533 * 534 * Default value is 0x3 (FB Physical address for TMR). When programming 535 * DMUB to be in system memory, change to 0x4. The system memory allocated 536 * is accessible by both GPU and CPU, so we use GPU virtual address. 537 */ 538 REG_WRITE(DMCUB_REGION3_TMR_AXI_SPACE, 0x4); 539 } 540 541 bool dmub_dcn35_should_detect(struct dmub_srv *dmub) 542 { 543 uint32_t fw_boot_status = REG_READ(DMCUB_SCRATCH0); 544 bool should_detect = (fw_boot_status & DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED) != 0; 545 return should_detect; 546 } 547 548 void dmub_dcn35_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data) 549 { 550 REG_WRITE(DMCUB_INBOX0_WPTR, data.inbox0_cmd_common.all); 551 } 552 553 void dmub_dcn35_clear_inbox0_ack_register(struct dmub_srv *dmub) 554 { 555 REG_WRITE(DMCUB_SCRATCH17, 0); 556 } 557 558 uint32_t dmub_dcn35_read_inbox0_ack_register(struct dmub_srv *dmub) 559 { 560 return REG_READ(DMCUB_SCRATCH17); 561 } 562 563 bool dmub_dcn35_is_hw_powered_up(struct dmub_srv *dmub) 564 { 565 union dmub_fw_boot_status status; 566 uint32_t is_enable; 567 568 REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enable); 569 if (is_enable == 0) 570 return false; 571 572 status.all = REG_READ(DMCUB_SCRATCH0); 573 574 return (status.bits.dal_fw && status.bits.hw_power_init_done && status.bits.mailbox_rdy) || 575 (!status.bits.dal_fw && status.bits.mailbox_rdy); 576 } 577