1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright 2025 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <linux/delay.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_reset.h" 28 #include "amdgpu_trace.h" 29 #include "amdgpu_virt.h" 30 #include "amdgpu_reg_access.h" 31 32 #define AMDGPU_PCIE_INDEX_FALLBACK (0x38 >> 2) 33 #define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2) 34 #define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2) 35 36 /* 37 * register access helper functions. 38 */ 39 40 /** 41 * amdgpu_device_rreg - read a memory mapped IO or indirect register 42 * 43 * @adev: amdgpu_device pointer 44 * @reg: dword aligned register offset 45 * @acc_flags: access flags which require special behavior 46 * 47 * Returns the 32 bit value from the offset specified. 48 */ 49 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg, 50 uint32_t acc_flags) 51 { 52 uint32_t ret; 53 54 if (amdgpu_device_skip_hw_access(adev)) 55 return 0; 56 57 if ((reg * 4) < adev->rmmio_size) { 58 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && 59 amdgpu_sriov_runtime(adev) && 60 down_read_trylock(&adev->reset_domain->sem)) { 61 ret = amdgpu_kiq_rreg(adev, reg, 0); 62 up_read(&adev->reset_domain->sem); 63 } else { 64 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); 65 } 66 } else { 67 ret = adev->pcie_rreg(adev, reg * 4); 68 } 69 70 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret); 71 72 return ret; 73 } 74 75 /* 76 * MMIO register read with bytes helper functions 77 * @offset:bytes offset from MMIO start 78 */ 79 80 /** 81 * amdgpu_mm_rreg8 - read a memory mapped IO register 82 * 83 * @adev: amdgpu_device pointer 84 * @offset: byte aligned register offset 85 * 86 * Returns the 8 bit value from the offset specified. 87 */ 88 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset) 89 { 90 if (amdgpu_device_skip_hw_access(adev)) 91 return 0; 92 93 if (offset < adev->rmmio_size) 94 return (readb(adev->rmmio + offset)); 95 BUG(); 96 } 97 98 /** 99 * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC 100 * 101 * @adev: amdgpu_device pointer 102 * @reg: dword aligned register offset 103 * @acc_flags: access flags which require special behavior 104 * @xcc_id: xcc accelerated compute core id 105 * 106 * Returns the 32 bit value from the offset specified. 107 */ 108 uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev, uint32_t reg, 109 uint32_t acc_flags, uint32_t xcc_id) 110 { 111 uint32_t ret, rlcg_flag; 112 113 if (amdgpu_device_skip_hw_access(adev)) 114 return 0; 115 116 if ((reg * 4) < adev->rmmio_size) { 117 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_runtime(adev) && 118 adev->gfx.rlc.rlcg_reg_access_supported && 119 amdgpu_virt_get_rlcg_reg_access_flag( 120 adev, acc_flags, GC_HWIP, false, &rlcg_flag)) { 121 ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, 122 GET_INST(GC, xcc_id)); 123 } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && 124 amdgpu_sriov_runtime(adev) && 125 down_read_trylock(&adev->reset_domain->sem)) { 126 ret = amdgpu_kiq_rreg(adev, reg, xcc_id); 127 up_read(&adev->reset_domain->sem); 128 } else { 129 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); 130 } 131 } else { 132 ret = adev->pcie_rreg(adev, reg * 4); 133 } 134 135 return ret; 136 } 137 138 /* 139 * MMIO register write with bytes helper functions 140 * @offset:bytes offset from MMIO start 141 * @value: the value want to be written to the register 142 */ 143 144 /** 145 * amdgpu_mm_wreg8 - read a memory mapped IO register 146 * 147 * @adev: amdgpu_device pointer 148 * @offset: byte aligned register offset 149 * @value: 8 bit value to write 150 * 151 * Writes the value specified to the offset specified. 152 */ 153 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value) 154 { 155 if (amdgpu_device_skip_hw_access(adev)) 156 return; 157 158 if (offset < adev->rmmio_size) 159 writeb(value, adev->rmmio + offset); 160 else 161 BUG(); 162 } 163 164 /** 165 * amdgpu_device_wreg - write to a memory mapped IO or indirect register 166 * 167 * @adev: amdgpu_device pointer 168 * @reg: dword aligned register offset 169 * @v: 32 bit value to write to the register 170 * @acc_flags: access flags which require special behavior 171 * 172 * Writes the value specified to the offset specified. 173 */ 174 void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 175 uint32_t acc_flags) 176 { 177 if (amdgpu_device_skip_hw_access(adev)) 178 return; 179 180 if ((reg * 4) < adev->rmmio_size) { 181 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && 182 amdgpu_sriov_runtime(adev) && 183 down_read_trylock(&adev->reset_domain->sem)) { 184 amdgpu_kiq_wreg(adev, reg, v, 0); 185 up_read(&adev->reset_domain->sem); 186 } else { 187 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 188 } 189 } else { 190 adev->pcie_wreg(adev, reg * 4, v); 191 } 192 193 trace_amdgpu_device_wreg(adev->pdev->device, reg, v); 194 } 195 196 /** 197 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range 198 * 199 * @adev: amdgpu_device pointer 200 * @reg: mmio/rlc register 201 * @v: value to write 202 * @xcc_id: xcc accelerated compute core id 203 * 204 * this function is invoked only for the debugfs register access 205 */ 206 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, 207 uint32_t v, uint32_t xcc_id) 208 { 209 if (amdgpu_device_skip_hw_access(adev)) 210 return; 211 212 if (amdgpu_sriov_fullaccess(adev) && adev->gfx.rlc.funcs && 213 adev->gfx.rlc.funcs->is_rlcg_access_range) { 214 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) 215 return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id); 216 } else if ((reg * 4) >= adev->rmmio_size) { 217 adev->pcie_wreg(adev, reg * 4, v); 218 } else { 219 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 220 } 221 } 222 223 /** 224 * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC 225 * 226 * @adev: amdgpu_device pointer 227 * @reg: dword aligned register offset 228 * @v: 32 bit value to write to the register 229 * @acc_flags: access flags which require special behavior 230 * @xcc_id: xcc accelerated compute core id 231 * 232 * Writes the value specified to the offset specified. 233 */ 234 void amdgpu_device_xcc_wreg(struct amdgpu_device *adev, uint32_t reg, 235 uint32_t v, uint32_t acc_flags, uint32_t xcc_id) 236 { 237 uint32_t rlcg_flag; 238 239 if (amdgpu_device_skip_hw_access(adev)) 240 return; 241 242 if ((reg * 4) < adev->rmmio_size) { 243 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_runtime(adev) && 244 adev->gfx.rlc.rlcg_reg_access_supported && 245 amdgpu_virt_get_rlcg_reg_access_flag( 246 adev, acc_flags, GC_HWIP, true, &rlcg_flag)) { 247 amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, 248 GET_INST(GC, xcc_id)); 249 } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && 250 amdgpu_sriov_runtime(adev) && 251 down_read_trylock(&adev->reset_domain->sem)) { 252 amdgpu_kiq_wreg(adev, reg, v, xcc_id); 253 up_read(&adev->reset_domain->sem); 254 } else { 255 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 256 } 257 } else { 258 adev->pcie_wreg(adev, reg * 4, v); 259 } 260 } 261 262 /** 263 * amdgpu_device_indirect_rreg - read an indirect register 264 * 265 * @adev: amdgpu_device pointer 266 * @reg_addr: indirect register address to read from 267 * 268 * Returns the value of indirect register @reg_addr 269 */ 270 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, u32 reg_addr) 271 { 272 unsigned long flags, pcie_index, pcie_data; 273 void __iomem *pcie_index_offset; 274 void __iomem *pcie_data_offset; 275 u32 r; 276 277 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); 278 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); 279 280 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 281 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 282 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 283 284 writel(reg_addr, pcie_index_offset); 285 readl(pcie_index_offset); 286 r = readl(pcie_data_offset); 287 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 288 289 return r; 290 } 291 292 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev, u64 reg_addr) 293 { 294 unsigned long flags, pcie_index, pcie_index_hi, pcie_data; 295 u32 r; 296 void __iomem *pcie_index_offset; 297 void __iomem *pcie_index_hi_offset; 298 void __iomem *pcie_data_offset; 299 300 if (unlikely(!adev->nbio.funcs)) { 301 pcie_index = AMDGPU_PCIE_INDEX_FALLBACK; 302 pcie_data = AMDGPU_PCIE_DATA_FALLBACK; 303 } else { 304 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); 305 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); 306 } 307 308 if (reg_addr >> 32) { 309 if (unlikely(!adev->nbio.funcs)) 310 pcie_index_hi = AMDGPU_PCIE_INDEX_HI_FALLBACK; 311 else 312 pcie_index_hi = 313 adev->nbio.funcs->get_pcie_index_hi_offset( 314 adev); 315 } else { 316 pcie_index_hi = 0; 317 } 318 319 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 320 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 321 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 322 if (pcie_index_hi != 0) 323 pcie_index_hi_offset = 324 (void __iomem *)adev->rmmio + pcie_index_hi * 4; 325 326 writel(reg_addr, pcie_index_offset); 327 readl(pcie_index_offset); 328 if (pcie_index_hi != 0) { 329 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset); 330 readl(pcie_index_hi_offset); 331 } 332 r = readl(pcie_data_offset); 333 334 /* clear the high bits */ 335 if (pcie_index_hi != 0) { 336 writel(0, pcie_index_hi_offset); 337 readl(pcie_index_hi_offset); 338 } 339 340 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 341 342 return r; 343 } 344 345 /** 346 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register 347 * 348 * @adev: amdgpu_device pointer 349 * @reg_addr: indirect register address to read from 350 * 351 * Returns the value of indirect register @reg_addr 352 */ 353 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, u32 reg_addr) 354 { 355 unsigned long flags, pcie_index, pcie_data; 356 void __iomem *pcie_index_offset; 357 void __iomem *pcie_data_offset; 358 u64 r; 359 360 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); 361 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); 362 363 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 364 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 365 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 366 367 /* read low 32 bits */ 368 writel(reg_addr, pcie_index_offset); 369 readl(pcie_index_offset); 370 r = readl(pcie_data_offset); 371 /* read high 32 bits */ 372 writel(reg_addr + 4, pcie_index_offset); 373 readl(pcie_index_offset); 374 r |= ((u64)readl(pcie_data_offset) << 32); 375 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 376 377 return r; 378 } 379 380 u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev, u64 reg_addr) 381 { 382 unsigned long flags, pcie_index, pcie_data; 383 unsigned long pcie_index_hi = 0; 384 void __iomem *pcie_index_offset; 385 void __iomem *pcie_index_hi_offset; 386 void __iomem *pcie_data_offset; 387 u64 r; 388 389 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); 390 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); 391 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) 392 pcie_index_hi = 393 adev->nbio.funcs->get_pcie_index_hi_offset(adev); 394 395 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 396 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 397 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 398 if (pcie_index_hi != 0) 399 pcie_index_hi_offset = 400 (void __iomem *)adev->rmmio + pcie_index_hi * 4; 401 402 /* read low 32 bits */ 403 writel(reg_addr, pcie_index_offset); 404 readl(pcie_index_offset); 405 if (pcie_index_hi != 0) { 406 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset); 407 readl(pcie_index_hi_offset); 408 } 409 r = readl(pcie_data_offset); 410 /* read high 32 bits */ 411 writel(reg_addr + 4, pcie_index_offset); 412 readl(pcie_index_offset); 413 if (pcie_index_hi != 0) { 414 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset); 415 readl(pcie_index_hi_offset); 416 } 417 r |= ((u64)readl(pcie_data_offset) << 32); 418 419 /* clear the high bits */ 420 if (pcie_index_hi != 0) { 421 writel(0, pcie_index_hi_offset); 422 readl(pcie_index_hi_offset); 423 } 424 425 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 426 427 return r; 428 } 429 430 /** 431 * amdgpu_device_indirect_wreg - write an indirect register address 432 * 433 * @adev: amdgpu_device pointer 434 * @reg_addr: indirect register offset 435 * @reg_data: indirect register data 436 * 437 */ 438 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, u32 reg_addr, 439 u32 reg_data) 440 { 441 unsigned long flags, pcie_index, pcie_data; 442 void __iomem *pcie_index_offset; 443 void __iomem *pcie_data_offset; 444 445 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); 446 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); 447 448 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 449 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 450 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 451 452 writel(reg_addr, pcie_index_offset); 453 readl(pcie_index_offset); 454 writel(reg_data, pcie_data_offset); 455 readl(pcie_data_offset); 456 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 457 } 458 459 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev, u64 reg_addr, 460 u32 reg_data) 461 { 462 unsigned long flags, pcie_index, pcie_index_hi, pcie_data; 463 void __iomem *pcie_index_offset; 464 void __iomem *pcie_index_hi_offset; 465 void __iomem *pcie_data_offset; 466 467 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); 468 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); 469 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) 470 pcie_index_hi = 471 adev->nbio.funcs->get_pcie_index_hi_offset(adev); 472 else 473 pcie_index_hi = 0; 474 475 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 476 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 477 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 478 if (pcie_index_hi != 0) 479 pcie_index_hi_offset = 480 (void __iomem *)adev->rmmio + pcie_index_hi * 4; 481 482 writel(reg_addr, pcie_index_offset); 483 readl(pcie_index_offset); 484 if (pcie_index_hi != 0) { 485 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset); 486 readl(pcie_index_hi_offset); 487 } 488 writel(reg_data, pcie_data_offset); 489 readl(pcie_data_offset); 490 491 /* clear the high bits */ 492 if (pcie_index_hi != 0) { 493 writel(0, pcie_index_hi_offset); 494 readl(pcie_index_hi_offset); 495 } 496 497 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 498 } 499 500 /** 501 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address 502 * 503 * @adev: amdgpu_device pointer 504 * @reg_addr: indirect register offset 505 * @reg_data: indirect register data 506 * 507 */ 508 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, u32 reg_addr, 509 u64 reg_data) 510 { 511 unsigned long flags, pcie_index, pcie_data; 512 void __iomem *pcie_index_offset; 513 void __iomem *pcie_data_offset; 514 515 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); 516 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); 517 518 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 519 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 520 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 521 522 /* write low 32 bits */ 523 writel(reg_addr, pcie_index_offset); 524 readl(pcie_index_offset); 525 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset); 526 readl(pcie_data_offset); 527 /* write high 32 bits */ 528 writel(reg_addr + 4, pcie_index_offset); 529 readl(pcie_index_offset); 530 writel((u32)(reg_data >> 32), pcie_data_offset); 531 readl(pcie_data_offset); 532 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 533 } 534 535 void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev, u64 reg_addr, 536 u64 reg_data) 537 { 538 unsigned long flags, pcie_index, pcie_data; 539 unsigned long pcie_index_hi = 0; 540 void __iomem *pcie_index_offset; 541 void __iomem *pcie_index_hi_offset; 542 void __iomem *pcie_data_offset; 543 544 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev); 545 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev); 546 if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset)) 547 pcie_index_hi = 548 adev->nbio.funcs->get_pcie_index_hi_offset(adev); 549 550 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 551 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4; 552 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4; 553 if (pcie_index_hi != 0) 554 pcie_index_hi_offset = 555 (void __iomem *)adev->rmmio + pcie_index_hi * 4; 556 557 /* write low 32 bits */ 558 writel(reg_addr, pcie_index_offset); 559 readl(pcie_index_offset); 560 if (pcie_index_hi != 0) { 561 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset); 562 readl(pcie_index_hi_offset); 563 } 564 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset); 565 readl(pcie_data_offset); 566 /* write high 32 bits */ 567 writel(reg_addr + 4, pcie_index_offset); 568 readl(pcie_index_offset); 569 if (pcie_index_hi != 0) { 570 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset); 571 readl(pcie_index_hi_offset); 572 } 573 writel((u32)(reg_data >> 32), pcie_data_offset); 574 readl(pcie_data_offset); 575 576 /* clear the high bits */ 577 if (pcie_index_hi != 0) { 578 writel(0, pcie_index_hi_offset); 579 readl(pcie_index_hi_offset); 580 } 581 582 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 583 } 584 585 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev, u32 reg) 586 { 587 unsigned long flags, address, data; 588 u32 r; 589 590 address = adev->nbio.funcs->get_pcie_port_index_offset(adev); 591 data = adev->nbio.funcs->get_pcie_port_data_offset(adev); 592 593 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 594 WREG32(address, reg * 4); 595 (void)RREG32(address); 596 r = RREG32(data); 597 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 598 return r; 599 } 600 601 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 602 { 603 unsigned long flags, address, data; 604 605 address = adev->nbio.funcs->get_pcie_port_index_offset(adev); 606 data = adev->nbio.funcs->get_pcie_port_data_offset(adev); 607 608 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 609 WREG32(address, reg * 4); 610 (void)RREG32(address); 611 WREG32(data, v); 612 (void)RREG32(data); 613 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 614 } 615 616 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev, uint32_t inst, 617 uint32_t reg_addr, char reg_name[], 618 uint32_t expected_value, uint32_t mask) 619 { 620 uint32_t ret = 0; 621 uint32_t old_ = 0; 622 uint32_t tmp_ = RREG32(reg_addr); 623 uint32_t loop = adev->usec_timeout; 624 625 while ((tmp_ & (mask)) != (expected_value)) { 626 if (old_ != tmp_) { 627 loop = adev->usec_timeout; 628 old_ = tmp_; 629 } else 630 udelay(1); 631 tmp_ = RREG32(reg_addr); 632 loop--; 633 if (!loop) { 634 dev_warn( 635 adev->dev, 636 "Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn", 637 inst, reg_name, (uint32_t)expected_value, 638 (uint32_t)(tmp_ & (mask))); 639 ret = -ETIMEDOUT; 640 break; 641 } 642 } 643 return ret; 644 } 645