1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <linux/module.h> 26 27 #include "amdgpu.h" 28 #include "amdgpu_ih.h" 29 #include "amdgpu_gfx.h" 30 #include "cikd.h" 31 #include "cik.h" 32 #include "cik_structs.h" 33 #include "atom.h" 34 #include "amdgpu_ucode.h" 35 #include "clearstate_ci.h" 36 37 #include "dce/dce_8_0_d.h" 38 #include "dce/dce_8_0_sh_mask.h" 39 40 #include "bif/bif_4_1_d.h" 41 #include "bif/bif_4_1_sh_mask.h" 42 43 #include "gca/gfx_7_0_d.h" 44 #include "gca/gfx_7_2_enum.h" 45 #include "gca/gfx_7_2_sh_mask.h" 46 47 #include "gmc/gmc_7_0_d.h" 48 #include "gmc/gmc_7_0_sh_mask.h" 49 50 #include "oss/oss_2_0_d.h" 51 #include "oss/oss_2_0_sh_mask.h" 52 53 #define NUM_SIMD_PER_CU 0x4 /* missing from the gfx_7 IP headers */ 54 55 #define GFX7_NUM_GFX_RINGS 1 56 #define GFX7_MEC_HPD_SIZE 2048 57 58 #define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001 59 #define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003 60 61 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev); 62 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev); 63 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev); 64 65 MODULE_FIRMWARE("amdgpu/bonaire_pfp.bin"); 66 MODULE_FIRMWARE("amdgpu/bonaire_me.bin"); 67 MODULE_FIRMWARE("amdgpu/bonaire_ce.bin"); 68 MODULE_FIRMWARE("amdgpu/bonaire_rlc.bin"); 69 MODULE_FIRMWARE("amdgpu/bonaire_mec.bin"); 70 71 MODULE_FIRMWARE("amdgpu/hawaii_pfp.bin"); 72 MODULE_FIRMWARE("amdgpu/hawaii_me.bin"); 73 MODULE_FIRMWARE("amdgpu/hawaii_ce.bin"); 74 MODULE_FIRMWARE("amdgpu/hawaii_rlc.bin"); 75 MODULE_FIRMWARE("amdgpu/hawaii_mec.bin"); 76 77 MODULE_FIRMWARE("amdgpu/kaveri_pfp.bin"); 78 MODULE_FIRMWARE("amdgpu/kaveri_me.bin"); 79 MODULE_FIRMWARE("amdgpu/kaveri_ce.bin"); 80 MODULE_FIRMWARE("amdgpu/kaveri_rlc.bin"); 81 MODULE_FIRMWARE("amdgpu/kaveri_mec.bin"); 82 MODULE_FIRMWARE("amdgpu/kaveri_mec2.bin"); 83 84 MODULE_FIRMWARE("amdgpu/kabini_pfp.bin"); 85 MODULE_FIRMWARE("amdgpu/kabini_me.bin"); 86 MODULE_FIRMWARE("amdgpu/kabini_ce.bin"); 87 MODULE_FIRMWARE("amdgpu/kabini_rlc.bin"); 88 MODULE_FIRMWARE("amdgpu/kabini_mec.bin"); 89 90 MODULE_FIRMWARE("amdgpu/mullins_pfp.bin"); 91 MODULE_FIRMWARE("amdgpu/mullins_me.bin"); 92 MODULE_FIRMWARE("amdgpu/mullins_ce.bin"); 93 MODULE_FIRMWARE("amdgpu/mullins_rlc.bin"); 94 MODULE_FIRMWARE("amdgpu/mullins_mec.bin"); 95 96 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = { 97 {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0}, 98 {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1}, 99 {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2}, 100 {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3}, 101 {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4}, 102 {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5}, 103 {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6}, 104 {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7}, 105 {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8}, 106 {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9}, 107 {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10}, 108 {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11}, 109 {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12}, 110 {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13}, 111 {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14}, 112 {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15} 113 }; 114 115 static const u32 spectre_rlc_save_restore_register_list[] = { 116 (0x0e00 << 16) | (0xc12c >> 2), 117 0x00000000, 118 (0x0e00 << 16) | (0xc140 >> 2), 119 0x00000000, 120 (0x0e00 << 16) | (0xc150 >> 2), 121 0x00000000, 122 (0x0e00 << 16) | (0xc15c >> 2), 123 0x00000000, 124 (0x0e00 << 16) | (0xc168 >> 2), 125 0x00000000, 126 (0x0e00 << 16) | (0xc170 >> 2), 127 0x00000000, 128 (0x0e00 << 16) | (0xc178 >> 2), 129 0x00000000, 130 (0x0e00 << 16) | (0xc204 >> 2), 131 0x00000000, 132 (0x0e00 << 16) | (0xc2b4 >> 2), 133 0x00000000, 134 (0x0e00 << 16) | (0xc2b8 >> 2), 135 0x00000000, 136 (0x0e00 << 16) | (0xc2bc >> 2), 137 0x00000000, 138 (0x0e00 << 16) | (0xc2c0 >> 2), 139 0x00000000, 140 (0x0e00 << 16) | (0x8228 >> 2), 141 0x00000000, 142 (0x0e00 << 16) | (0x829c >> 2), 143 0x00000000, 144 (0x0e00 << 16) | (0x869c >> 2), 145 0x00000000, 146 (0x0600 << 16) | (0x98f4 >> 2), 147 0x00000000, 148 (0x0e00 << 16) | (0x98f8 >> 2), 149 0x00000000, 150 (0x0e00 << 16) | (0x9900 >> 2), 151 0x00000000, 152 (0x0e00 << 16) | (0xc260 >> 2), 153 0x00000000, 154 (0x0e00 << 16) | (0x90e8 >> 2), 155 0x00000000, 156 (0x0e00 << 16) | (0x3c000 >> 2), 157 0x00000000, 158 (0x0e00 << 16) | (0x3c00c >> 2), 159 0x00000000, 160 (0x0e00 << 16) | (0x8c1c >> 2), 161 0x00000000, 162 (0x0e00 << 16) | (0x9700 >> 2), 163 0x00000000, 164 (0x0e00 << 16) | (0xcd20 >> 2), 165 0x00000000, 166 (0x4e00 << 16) | (0xcd20 >> 2), 167 0x00000000, 168 (0x5e00 << 16) | (0xcd20 >> 2), 169 0x00000000, 170 (0x6e00 << 16) | (0xcd20 >> 2), 171 0x00000000, 172 (0x7e00 << 16) | (0xcd20 >> 2), 173 0x00000000, 174 (0x8e00 << 16) | (0xcd20 >> 2), 175 0x00000000, 176 (0x9e00 << 16) | (0xcd20 >> 2), 177 0x00000000, 178 (0xae00 << 16) | (0xcd20 >> 2), 179 0x00000000, 180 (0xbe00 << 16) | (0xcd20 >> 2), 181 0x00000000, 182 (0x0e00 << 16) | (0x89bc >> 2), 183 0x00000000, 184 (0x0e00 << 16) | (0x8900 >> 2), 185 0x00000000, 186 0x3, 187 (0x0e00 << 16) | (0xc130 >> 2), 188 0x00000000, 189 (0x0e00 << 16) | (0xc134 >> 2), 190 0x00000000, 191 (0x0e00 << 16) | (0xc1fc >> 2), 192 0x00000000, 193 (0x0e00 << 16) | (0xc208 >> 2), 194 0x00000000, 195 (0x0e00 << 16) | (0xc264 >> 2), 196 0x00000000, 197 (0x0e00 << 16) | (0xc268 >> 2), 198 0x00000000, 199 (0x0e00 << 16) | (0xc26c >> 2), 200 0x00000000, 201 (0x0e00 << 16) | (0xc270 >> 2), 202 0x00000000, 203 (0x0e00 << 16) | (0xc274 >> 2), 204 0x00000000, 205 (0x0e00 << 16) | (0xc278 >> 2), 206 0x00000000, 207 (0x0e00 << 16) | (0xc27c >> 2), 208 0x00000000, 209 (0x0e00 << 16) | (0xc280 >> 2), 210 0x00000000, 211 (0x0e00 << 16) | (0xc284 >> 2), 212 0x00000000, 213 (0x0e00 << 16) | (0xc288 >> 2), 214 0x00000000, 215 (0x0e00 << 16) | (0xc28c >> 2), 216 0x00000000, 217 (0x0e00 << 16) | (0xc290 >> 2), 218 0x00000000, 219 (0x0e00 << 16) | (0xc294 >> 2), 220 0x00000000, 221 (0x0e00 << 16) | (0xc298 >> 2), 222 0x00000000, 223 (0x0e00 << 16) | (0xc29c >> 2), 224 0x00000000, 225 (0x0e00 << 16) | (0xc2a0 >> 2), 226 0x00000000, 227 (0x0e00 << 16) | (0xc2a4 >> 2), 228 0x00000000, 229 (0x0e00 << 16) | (0xc2a8 >> 2), 230 0x00000000, 231 (0x0e00 << 16) | (0xc2ac >> 2), 232 0x00000000, 233 (0x0e00 << 16) | (0xc2b0 >> 2), 234 0x00000000, 235 (0x0e00 << 16) | (0x301d0 >> 2), 236 0x00000000, 237 (0x0e00 << 16) | (0x30238 >> 2), 238 0x00000000, 239 (0x0e00 << 16) | (0x30250 >> 2), 240 0x00000000, 241 (0x0e00 << 16) | (0x30254 >> 2), 242 0x00000000, 243 (0x0e00 << 16) | (0x30258 >> 2), 244 0x00000000, 245 (0x0e00 << 16) | (0x3025c >> 2), 246 0x00000000, 247 (0x4e00 << 16) | (0xc900 >> 2), 248 0x00000000, 249 (0x5e00 << 16) | (0xc900 >> 2), 250 0x00000000, 251 (0x6e00 << 16) | (0xc900 >> 2), 252 0x00000000, 253 (0x7e00 << 16) | (0xc900 >> 2), 254 0x00000000, 255 (0x8e00 << 16) | (0xc900 >> 2), 256 0x00000000, 257 (0x9e00 << 16) | (0xc900 >> 2), 258 0x00000000, 259 (0xae00 << 16) | (0xc900 >> 2), 260 0x00000000, 261 (0xbe00 << 16) | (0xc900 >> 2), 262 0x00000000, 263 (0x4e00 << 16) | (0xc904 >> 2), 264 0x00000000, 265 (0x5e00 << 16) | (0xc904 >> 2), 266 0x00000000, 267 (0x6e00 << 16) | (0xc904 >> 2), 268 0x00000000, 269 (0x7e00 << 16) | (0xc904 >> 2), 270 0x00000000, 271 (0x8e00 << 16) | (0xc904 >> 2), 272 0x00000000, 273 (0x9e00 << 16) | (0xc904 >> 2), 274 0x00000000, 275 (0xae00 << 16) | (0xc904 >> 2), 276 0x00000000, 277 (0xbe00 << 16) | (0xc904 >> 2), 278 0x00000000, 279 (0x4e00 << 16) | (0xc908 >> 2), 280 0x00000000, 281 (0x5e00 << 16) | (0xc908 >> 2), 282 0x00000000, 283 (0x6e00 << 16) | (0xc908 >> 2), 284 0x00000000, 285 (0x7e00 << 16) | (0xc908 >> 2), 286 0x00000000, 287 (0x8e00 << 16) | (0xc908 >> 2), 288 0x00000000, 289 (0x9e00 << 16) | (0xc908 >> 2), 290 0x00000000, 291 (0xae00 << 16) | (0xc908 >> 2), 292 0x00000000, 293 (0xbe00 << 16) | (0xc908 >> 2), 294 0x00000000, 295 (0x4e00 << 16) | (0xc90c >> 2), 296 0x00000000, 297 (0x5e00 << 16) | (0xc90c >> 2), 298 0x00000000, 299 (0x6e00 << 16) | (0xc90c >> 2), 300 0x00000000, 301 (0x7e00 << 16) | (0xc90c >> 2), 302 0x00000000, 303 (0x8e00 << 16) | (0xc90c >> 2), 304 0x00000000, 305 (0x9e00 << 16) | (0xc90c >> 2), 306 0x00000000, 307 (0xae00 << 16) | (0xc90c >> 2), 308 0x00000000, 309 (0xbe00 << 16) | (0xc90c >> 2), 310 0x00000000, 311 (0x4e00 << 16) | (0xc910 >> 2), 312 0x00000000, 313 (0x5e00 << 16) | (0xc910 >> 2), 314 0x00000000, 315 (0x6e00 << 16) | (0xc910 >> 2), 316 0x00000000, 317 (0x7e00 << 16) | (0xc910 >> 2), 318 0x00000000, 319 (0x8e00 << 16) | (0xc910 >> 2), 320 0x00000000, 321 (0x9e00 << 16) | (0xc910 >> 2), 322 0x00000000, 323 (0xae00 << 16) | (0xc910 >> 2), 324 0x00000000, 325 (0xbe00 << 16) | (0xc910 >> 2), 326 0x00000000, 327 (0x0e00 << 16) | (0xc99c >> 2), 328 0x00000000, 329 (0x0e00 << 16) | (0x9834 >> 2), 330 0x00000000, 331 (0x0000 << 16) | (0x30f00 >> 2), 332 0x00000000, 333 (0x0001 << 16) | (0x30f00 >> 2), 334 0x00000000, 335 (0x0000 << 16) | (0x30f04 >> 2), 336 0x00000000, 337 (0x0001 << 16) | (0x30f04 >> 2), 338 0x00000000, 339 (0x0000 << 16) | (0x30f08 >> 2), 340 0x00000000, 341 (0x0001 << 16) | (0x30f08 >> 2), 342 0x00000000, 343 (0x0000 << 16) | (0x30f0c >> 2), 344 0x00000000, 345 (0x0001 << 16) | (0x30f0c >> 2), 346 0x00000000, 347 (0x0600 << 16) | (0x9b7c >> 2), 348 0x00000000, 349 (0x0e00 << 16) | (0x8a14 >> 2), 350 0x00000000, 351 (0x0e00 << 16) | (0x8a18 >> 2), 352 0x00000000, 353 (0x0600 << 16) | (0x30a00 >> 2), 354 0x00000000, 355 (0x0e00 << 16) | (0x8bf0 >> 2), 356 0x00000000, 357 (0x0e00 << 16) | (0x8bcc >> 2), 358 0x00000000, 359 (0x0e00 << 16) | (0x8b24 >> 2), 360 0x00000000, 361 (0x0e00 << 16) | (0x30a04 >> 2), 362 0x00000000, 363 (0x0600 << 16) | (0x30a10 >> 2), 364 0x00000000, 365 (0x0600 << 16) | (0x30a14 >> 2), 366 0x00000000, 367 (0x0600 << 16) | (0x30a18 >> 2), 368 0x00000000, 369 (0x0600 << 16) | (0x30a2c >> 2), 370 0x00000000, 371 (0x0e00 << 16) | (0xc700 >> 2), 372 0x00000000, 373 (0x0e00 << 16) | (0xc704 >> 2), 374 0x00000000, 375 (0x0e00 << 16) | (0xc708 >> 2), 376 0x00000000, 377 (0x0e00 << 16) | (0xc768 >> 2), 378 0x00000000, 379 (0x0400 << 16) | (0xc770 >> 2), 380 0x00000000, 381 (0x0400 << 16) | (0xc774 >> 2), 382 0x00000000, 383 (0x0400 << 16) | (0xc778 >> 2), 384 0x00000000, 385 (0x0400 << 16) | (0xc77c >> 2), 386 0x00000000, 387 (0x0400 << 16) | (0xc780 >> 2), 388 0x00000000, 389 (0x0400 << 16) | (0xc784 >> 2), 390 0x00000000, 391 (0x0400 << 16) | (0xc788 >> 2), 392 0x00000000, 393 (0x0400 << 16) | (0xc78c >> 2), 394 0x00000000, 395 (0x0400 << 16) | (0xc798 >> 2), 396 0x00000000, 397 (0x0400 << 16) | (0xc79c >> 2), 398 0x00000000, 399 (0x0400 << 16) | (0xc7a0 >> 2), 400 0x00000000, 401 (0x0400 << 16) | (0xc7a4 >> 2), 402 0x00000000, 403 (0x0400 << 16) | (0xc7a8 >> 2), 404 0x00000000, 405 (0x0400 << 16) | (0xc7ac >> 2), 406 0x00000000, 407 (0x0400 << 16) | (0xc7b0 >> 2), 408 0x00000000, 409 (0x0400 << 16) | (0xc7b4 >> 2), 410 0x00000000, 411 (0x0e00 << 16) | (0x9100 >> 2), 412 0x00000000, 413 (0x0e00 << 16) | (0x3c010 >> 2), 414 0x00000000, 415 (0x0e00 << 16) | (0x92a8 >> 2), 416 0x00000000, 417 (0x0e00 << 16) | (0x92ac >> 2), 418 0x00000000, 419 (0x0e00 << 16) | (0x92b4 >> 2), 420 0x00000000, 421 (0x0e00 << 16) | (0x92b8 >> 2), 422 0x00000000, 423 (0x0e00 << 16) | (0x92bc >> 2), 424 0x00000000, 425 (0x0e00 << 16) | (0x92c0 >> 2), 426 0x00000000, 427 (0x0e00 << 16) | (0x92c4 >> 2), 428 0x00000000, 429 (0x0e00 << 16) | (0x92c8 >> 2), 430 0x00000000, 431 (0x0e00 << 16) | (0x92cc >> 2), 432 0x00000000, 433 (0x0e00 << 16) | (0x92d0 >> 2), 434 0x00000000, 435 (0x0e00 << 16) | (0x8c00 >> 2), 436 0x00000000, 437 (0x0e00 << 16) | (0x8c04 >> 2), 438 0x00000000, 439 (0x0e00 << 16) | (0x8c20 >> 2), 440 0x00000000, 441 (0x0e00 << 16) | (0x8c38 >> 2), 442 0x00000000, 443 (0x0e00 << 16) | (0x8c3c >> 2), 444 0x00000000, 445 (0x0e00 << 16) | (0xae00 >> 2), 446 0x00000000, 447 (0x0e00 << 16) | (0x9604 >> 2), 448 0x00000000, 449 (0x0e00 << 16) | (0xac08 >> 2), 450 0x00000000, 451 (0x0e00 << 16) | (0xac0c >> 2), 452 0x00000000, 453 (0x0e00 << 16) | (0xac10 >> 2), 454 0x00000000, 455 (0x0e00 << 16) | (0xac14 >> 2), 456 0x00000000, 457 (0x0e00 << 16) | (0xac58 >> 2), 458 0x00000000, 459 (0x0e00 << 16) | (0xac68 >> 2), 460 0x00000000, 461 (0x0e00 << 16) | (0xac6c >> 2), 462 0x00000000, 463 (0x0e00 << 16) | (0xac70 >> 2), 464 0x00000000, 465 (0x0e00 << 16) | (0xac74 >> 2), 466 0x00000000, 467 (0x0e00 << 16) | (0xac78 >> 2), 468 0x00000000, 469 (0x0e00 << 16) | (0xac7c >> 2), 470 0x00000000, 471 (0x0e00 << 16) | (0xac80 >> 2), 472 0x00000000, 473 (0x0e00 << 16) | (0xac84 >> 2), 474 0x00000000, 475 (0x0e00 << 16) | (0xac88 >> 2), 476 0x00000000, 477 (0x0e00 << 16) | (0xac8c >> 2), 478 0x00000000, 479 (0x0e00 << 16) | (0x970c >> 2), 480 0x00000000, 481 (0x0e00 << 16) | (0x9714 >> 2), 482 0x00000000, 483 (0x0e00 << 16) | (0x9718 >> 2), 484 0x00000000, 485 (0x0e00 << 16) | (0x971c >> 2), 486 0x00000000, 487 (0x0e00 << 16) | (0x31068 >> 2), 488 0x00000000, 489 (0x4e00 << 16) | (0x31068 >> 2), 490 0x00000000, 491 (0x5e00 << 16) | (0x31068 >> 2), 492 0x00000000, 493 (0x6e00 << 16) | (0x31068 >> 2), 494 0x00000000, 495 (0x7e00 << 16) | (0x31068 >> 2), 496 0x00000000, 497 (0x8e00 << 16) | (0x31068 >> 2), 498 0x00000000, 499 (0x9e00 << 16) | (0x31068 >> 2), 500 0x00000000, 501 (0xae00 << 16) | (0x31068 >> 2), 502 0x00000000, 503 (0xbe00 << 16) | (0x31068 >> 2), 504 0x00000000, 505 (0x0e00 << 16) | (0xcd10 >> 2), 506 0x00000000, 507 (0x0e00 << 16) | (0xcd14 >> 2), 508 0x00000000, 509 (0x0e00 << 16) | (0x88b0 >> 2), 510 0x00000000, 511 (0x0e00 << 16) | (0x88b4 >> 2), 512 0x00000000, 513 (0x0e00 << 16) | (0x88b8 >> 2), 514 0x00000000, 515 (0x0e00 << 16) | (0x88bc >> 2), 516 0x00000000, 517 (0x0400 << 16) | (0x89c0 >> 2), 518 0x00000000, 519 (0x0e00 << 16) | (0x88c4 >> 2), 520 0x00000000, 521 (0x0e00 << 16) | (0x88c8 >> 2), 522 0x00000000, 523 (0x0e00 << 16) | (0x88d0 >> 2), 524 0x00000000, 525 (0x0e00 << 16) | (0x88d4 >> 2), 526 0x00000000, 527 (0x0e00 << 16) | (0x88d8 >> 2), 528 0x00000000, 529 (0x0e00 << 16) | (0x8980 >> 2), 530 0x00000000, 531 (0x0e00 << 16) | (0x30938 >> 2), 532 0x00000000, 533 (0x0e00 << 16) | (0x3093c >> 2), 534 0x00000000, 535 (0x0e00 << 16) | (0x30940 >> 2), 536 0x00000000, 537 (0x0e00 << 16) | (0x89a0 >> 2), 538 0x00000000, 539 (0x0e00 << 16) | (0x30900 >> 2), 540 0x00000000, 541 (0x0e00 << 16) | (0x30904 >> 2), 542 0x00000000, 543 (0x0e00 << 16) | (0x89b4 >> 2), 544 0x00000000, 545 (0x0e00 << 16) | (0x3c210 >> 2), 546 0x00000000, 547 (0x0e00 << 16) | (0x3c214 >> 2), 548 0x00000000, 549 (0x0e00 << 16) | (0x3c218 >> 2), 550 0x00000000, 551 (0x0e00 << 16) | (0x8904 >> 2), 552 0x00000000, 553 0x5, 554 (0x0e00 << 16) | (0x8c28 >> 2), 555 (0x0e00 << 16) | (0x8c2c >> 2), 556 (0x0e00 << 16) | (0x8c30 >> 2), 557 (0x0e00 << 16) | (0x8c34 >> 2), 558 (0x0e00 << 16) | (0x9600 >> 2), 559 }; 560 561 static const u32 kalindi_rlc_save_restore_register_list[] = { 562 (0x0e00 << 16) | (0xc12c >> 2), 563 0x00000000, 564 (0x0e00 << 16) | (0xc140 >> 2), 565 0x00000000, 566 (0x0e00 << 16) | (0xc150 >> 2), 567 0x00000000, 568 (0x0e00 << 16) | (0xc15c >> 2), 569 0x00000000, 570 (0x0e00 << 16) | (0xc168 >> 2), 571 0x00000000, 572 (0x0e00 << 16) | (0xc170 >> 2), 573 0x00000000, 574 (0x0e00 << 16) | (0xc204 >> 2), 575 0x00000000, 576 (0x0e00 << 16) | (0xc2b4 >> 2), 577 0x00000000, 578 (0x0e00 << 16) | (0xc2b8 >> 2), 579 0x00000000, 580 (0x0e00 << 16) | (0xc2bc >> 2), 581 0x00000000, 582 (0x0e00 << 16) | (0xc2c0 >> 2), 583 0x00000000, 584 (0x0e00 << 16) | (0x8228 >> 2), 585 0x00000000, 586 (0x0e00 << 16) | (0x829c >> 2), 587 0x00000000, 588 (0x0e00 << 16) | (0x869c >> 2), 589 0x00000000, 590 (0x0600 << 16) | (0x98f4 >> 2), 591 0x00000000, 592 (0x0e00 << 16) | (0x98f8 >> 2), 593 0x00000000, 594 (0x0e00 << 16) | (0x9900 >> 2), 595 0x00000000, 596 (0x0e00 << 16) | (0xc260 >> 2), 597 0x00000000, 598 (0x0e00 << 16) | (0x90e8 >> 2), 599 0x00000000, 600 (0x0e00 << 16) | (0x3c000 >> 2), 601 0x00000000, 602 (0x0e00 << 16) | (0x3c00c >> 2), 603 0x00000000, 604 (0x0e00 << 16) | (0x8c1c >> 2), 605 0x00000000, 606 (0x0e00 << 16) | (0x9700 >> 2), 607 0x00000000, 608 (0x0e00 << 16) | (0xcd20 >> 2), 609 0x00000000, 610 (0x4e00 << 16) | (0xcd20 >> 2), 611 0x00000000, 612 (0x5e00 << 16) | (0xcd20 >> 2), 613 0x00000000, 614 (0x6e00 << 16) | (0xcd20 >> 2), 615 0x00000000, 616 (0x7e00 << 16) | (0xcd20 >> 2), 617 0x00000000, 618 (0x0e00 << 16) | (0x89bc >> 2), 619 0x00000000, 620 (0x0e00 << 16) | (0x8900 >> 2), 621 0x00000000, 622 0x3, 623 (0x0e00 << 16) | (0xc130 >> 2), 624 0x00000000, 625 (0x0e00 << 16) | (0xc134 >> 2), 626 0x00000000, 627 (0x0e00 << 16) | (0xc1fc >> 2), 628 0x00000000, 629 (0x0e00 << 16) | (0xc208 >> 2), 630 0x00000000, 631 (0x0e00 << 16) | (0xc264 >> 2), 632 0x00000000, 633 (0x0e00 << 16) | (0xc268 >> 2), 634 0x00000000, 635 (0x0e00 << 16) | (0xc26c >> 2), 636 0x00000000, 637 (0x0e00 << 16) | (0xc270 >> 2), 638 0x00000000, 639 (0x0e00 << 16) | (0xc274 >> 2), 640 0x00000000, 641 (0x0e00 << 16) | (0xc28c >> 2), 642 0x00000000, 643 (0x0e00 << 16) | (0xc290 >> 2), 644 0x00000000, 645 (0x0e00 << 16) | (0xc294 >> 2), 646 0x00000000, 647 (0x0e00 << 16) | (0xc298 >> 2), 648 0x00000000, 649 (0x0e00 << 16) | (0xc2a0 >> 2), 650 0x00000000, 651 (0x0e00 << 16) | (0xc2a4 >> 2), 652 0x00000000, 653 (0x0e00 << 16) | (0xc2a8 >> 2), 654 0x00000000, 655 (0x0e00 << 16) | (0xc2ac >> 2), 656 0x00000000, 657 (0x0e00 << 16) | (0x301d0 >> 2), 658 0x00000000, 659 (0x0e00 << 16) | (0x30238 >> 2), 660 0x00000000, 661 (0x0e00 << 16) | (0x30250 >> 2), 662 0x00000000, 663 (0x0e00 << 16) | (0x30254 >> 2), 664 0x00000000, 665 (0x0e00 << 16) | (0x30258 >> 2), 666 0x00000000, 667 (0x0e00 << 16) | (0x3025c >> 2), 668 0x00000000, 669 (0x4e00 << 16) | (0xc900 >> 2), 670 0x00000000, 671 (0x5e00 << 16) | (0xc900 >> 2), 672 0x00000000, 673 (0x6e00 << 16) | (0xc900 >> 2), 674 0x00000000, 675 (0x7e00 << 16) | (0xc900 >> 2), 676 0x00000000, 677 (0x4e00 << 16) | (0xc904 >> 2), 678 0x00000000, 679 (0x5e00 << 16) | (0xc904 >> 2), 680 0x00000000, 681 (0x6e00 << 16) | (0xc904 >> 2), 682 0x00000000, 683 (0x7e00 << 16) | (0xc904 >> 2), 684 0x00000000, 685 (0x4e00 << 16) | (0xc908 >> 2), 686 0x00000000, 687 (0x5e00 << 16) | (0xc908 >> 2), 688 0x00000000, 689 (0x6e00 << 16) | (0xc908 >> 2), 690 0x00000000, 691 (0x7e00 << 16) | (0xc908 >> 2), 692 0x00000000, 693 (0x4e00 << 16) | (0xc90c >> 2), 694 0x00000000, 695 (0x5e00 << 16) | (0xc90c >> 2), 696 0x00000000, 697 (0x6e00 << 16) | (0xc90c >> 2), 698 0x00000000, 699 (0x7e00 << 16) | (0xc90c >> 2), 700 0x00000000, 701 (0x4e00 << 16) | (0xc910 >> 2), 702 0x00000000, 703 (0x5e00 << 16) | (0xc910 >> 2), 704 0x00000000, 705 (0x6e00 << 16) | (0xc910 >> 2), 706 0x00000000, 707 (0x7e00 << 16) | (0xc910 >> 2), 708 0x00000000, 709 (0x0e00 << 16) | (0xc99c >> 2), 710 0x00000000, 711 (0x0e00 << 16) | (0x9834 >> 2), 712 0x00000000, 713 (0x0000 << 16) | (0x30f00 >> 2), 714 0x00000000, 715 (0x0000 << 16) | (0x30f04 >> 2), 716 0x00000000, 717 (0x0000 << 16) | (0x30f08 >> 2), 718 0x00000000, 719 (0x0000 << 16) | (0x30f0c >> 2), 720 0x00000000, 721 (0x0600 << 16) | (0x9b7c >> 2), 722 0x00000000, 723 (0x0e00 << 16) | (0x8a14 >> 2), 724 0x00000000, 725 (0x0e00 << 16) | (0x8a18 >> 2), 726 0x00000000, 727 (0x0600 << 16) | (0x30a00 >> 2), 728 0x00000000, 729 (0x0e00 << 16) | (0x8bf0 >> 2), 730 0x00000000, 731 (0x0e00 << 16) | (0x8bcc >> 2), 732 0x00000000, 733 (0x0e00 << 16) | (0x8b24 >> 2), 734 0x00000000, 735 (0x0e00 << 16) | (0x30a04 >> 2), 736 0x00000000, 737 (0x0600 << 16) | (0x30a10 >> 2), 738 0x00000000, 739 (0x0600 << 16) | (0x30a14 >> 2), 740 0x00000000, 741 (0x0600 << 16) | (0x30a18 >> 2), 742 0x00000000, 743 (0x0600 << 16) | (0x30a2c >> 2), 744 0x00000000, 745 (0x0e00 << 16) | (0xc700 >> 2), 746 0x00000000, 747 (0x0e00 << 16) | (0xc704 >> 2), 748 0x00000000, 749 (0x0e00 << 16) | (0xc708 >> 2), 750 0x00000000, 751 (0x0e00 << 16) | (0xc768 >> 2), 752 0x00000000, 753 (0x0400 << 16) | (0xc770 >> 2), 754 0x00000000, 755 (0x0400 << 16) | (0xc774 >> 2), 756 0x00000000, 757 (0x0400 << 16) | (0xc798 >> 2), 758 0x00000000, 759 (0x0400 << 16) | (0xc79c >> 2), 760 0x00000000, 761 (0x0e00 << 16) | (0x9100 >> 2), 762 0x00000000, 763 (0x0e00 << 16) | (0x3c010 >> 2), 764 0x00000000, 765 (0x0e00 << 16) | (0x8c00 >> 2), 766 0x00000000, 767 (0x0e00 << 16) | (0x8c04 >> 2), 768 0x00000000, 769 (0x0e00 << 16) | (0x8c20 >> 2), 770 0x00000000, 771 (0x0e00 << 16) | (0x8c38 >> 2), 772 0x00000000, 773 (0x0e00 << 16) | (0x8c3c >> 2), 774 0x00000000, 775 (0x0e00 << 16) | (0xae00 >> 2), 776 0x00000000, 777 (0x0e00 << 16) | (0x9604 >> 2), 778 0x00000000, 779 (0x0e00 << 16) | (0xac08 >> 2), 780 0x00000000, 781 (0x0e00 << 16) | (0xac0c >> 2), 782 0x00000000, 783 (0x0e00 << 16) | (0xac10 >> 2), 784 0x00000000, 785 (0x0e00 << 16) | (0xac14 >> 2), 786 0x00000000, 787 (0x0e00 << 16) | (0xac58 >> 2), 788 0x00000000, 789 (0x0e00 << 16) | (0xac68 >> 2), 790 0x00000000, 791 (0x0e00 << 16) | (0xac6c >> 2), 792 0x00000000, 793 (0x0e00 << 16) | (0xac70 >> 2), 794 0x00000000, 795 (0x0e00 << 16) | (0xac74 >> 2), 796 0x00000000, 797 (0x0e00 << 16) | (0xac78 >> 2), 798 0x00000000, 799 (0x0e00 << 16) | (0xac7c >> 2), 800 0x00000000, 801 (0x0e00 << 16) | (0xac80 >> 2), 802 0x00000000, 803 (0x0e00 << 16) | (0xac84 >> 2), 804 0x00000000, 805 (0x0e00 << 16) | (0xac88 >> 2), 806 0x00000000, 807 (0x0e00 << 16) | (0xac8c >> 2), 808 0x00000000, 809 (0x0e00 << 16) | (0x970c >> 2), 810 0x00000000, 811 (0x0e00 << 16) | (0x9714 >> 2), 812 0x00000000, 813 (0x0e00 << 16) | (0x9718 >> 2), 814 0x00000000, 815 (0x0e00 << 16) | (0x971c >> 2), 816 0x00000000, 817 (0x0e00 << 16) | (0x31068 >> 2), 818 0x00000000, 819 (0x4e00 << 16) | (0x31068 >> 2), 820 0x00000000, 821 (0x5e00 << 16) | (0x31068 >> 2), 822 0x00000000, 823 (0x6e00 << 16) | (0x31068 >> 2), 824 0x00000000, 825 (0x7e00 << 16) | (0x31068 >> 2), 826 0x00000000, 827 (0x0e00 << 16) | (0xcd10 >> 2), 828 0x00000000, 829 (0x0e00 << 16) | (0xcd14 >> 2), 830 0x00000000, 831 (0x0e00 << 16) | (0x88b0 >> 2), 832 0x00000000, 833 (0x0e00 << 16) | (0x88b4 >> 2), 834 0x00000000, 835 (0x0e00 << 16) | (0x88b8 >> 2), 836 0x00000000, 837 (0x0e00 << 16) | (0x88bc >> 2), 838 0x00000000, 839 (0x0400 << 16) | (0x89c0 >> 2), 840 0x00000000, 841 (0x0e00 << 16) | (0x88c4 >> 2), 842 0x00000000, 843 (0x0e00 << 16) | (0x88c8 >> 2), 844 0x00000000, 845 (0x0e00 << 16) | (0x88d0 >> 2), 846 0x00000000, 847 (0x0e00 << 16) | (0x88d4 >> 2), 848 0x00000000, 849 (0x0e00 << 16) | (0x88d8 >> 2), 850 0x00000000, 851 (0x0e00 << 16) | (0x8980 >> 2), 852 0x00000000, 853 (0x0e00 << 16) | (0x30938 >> 2), 854 0x00000000, 855 (0x0e00 << 16) | (0x3093c >> 2), 856 0x00000000, 857 (0x0e00 << 16) | (0x30940 >> 2), 858 0x00000000, 859 (0x0e00 << 16) | (0x89a0 >> 2), 860 0x00000000, 861 (0x0e00 << 16) | (0x30900 >> 2), 862 0x00000000, 863 (0x0e00 << 16) | (0x30904 >> 2), 864 0x00000000, 865 (0x0e00 << 16) | (0x89b4 >> 2), 866 0x00000000, 867 (0x0e00 << 16) | (0x3e1fc >> 2), 868 0x00000000, 869 (0x0e00 << 16) | (0x3c210 >> 2), 870 0x00000000, 871 (0x0e00 << 16) | (0x3c214 >> 2), 872 0x00000000, 873 (0x0e00 << 16) | (0x3c218 >> 2), 874 0x00000000, 875 (0x0e00 << 16) | (0x8904 >> 2), 876 0x00000000, 877 0x5, 878 (0x0e00 << 16) | (0x8c28 >> 2), 879 (0x0e00 << 16) | (0x8c2c >> 2), 880 (0x0e00 << 16) | (0x8c30 >> 2), 881 (0x0e00 << 16) | (0x8c34 >> 2), 882 (0x0e00 << 16) | (0x9600 >> 2), 883 }; 884 885 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev); 886 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer); 887 static void gfx_v7_0_init_pg(struct amdgpu_device *adev); 888 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev); 889 890 static void gfx_v7_0_free_microcode(struct amdgpu_device *adev) 891 { 892 amdgpu_ucode_release(&adev->gfx.pfp_fw); 893 amdgpu_ucode_release(&adev->gfx.me_fw); 894 amdgpu_ucode_release(&adev->gfx.ce_fw); 895 amdgpu_ucode_release(&adev->gfx.mec_fw); 896 amdgpu_ucode_release(&adev->gfx.mec2_fw); 897 amdgpu_ucode_release(&adev->gfx.rlc_fw); 898 } 899 900 /* 901 * Core functions 902 */ 903 /** 904 * gfx_v7_0_init_microcode - load ucode images from disk 905 * 906 * @adev: amdgpu_device pointer 907 * 908 * Use the firmware interface to load the ucode images into 909 * the driver (not loaded into hw). 910 * Returns 0 on success, error on failure. 911 */ 912 static int gfx_v7_0_init_microcode(struct amdgpu_device *adev) 913 { 914 const char *chip_name; 915 int err; 916 917 DRM_DEBUG("\n"); 918 919 switch (adev->asic_type) { 920 case CHIP_BONAIRE: 921 chip_name = "bonaire"; 922 break; 923 case CHIP_HAWAII: 924 chip_name = "hawaii"; 925 break; 926 case CHIP_KAVERI: 927 chip_name = "kaveri"; 928 break; 929 case CHIP_KABINI: 930 chip_name = "kabini"; 931 break; 932 case CHIP_MULLINS: 933 chip_name = "mullins"; 934 break; 935 default: 936 BUG(); 937 } 938 939 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, 940 AMDGPU_UCODE_REQUIRED, 941 "amdgpu/%s_pfp.bin", chip_name); 942 if (err) 943 goto out; 944 945 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, 946 AMDGPU_UCODE_REQUIRED, 947 "amdgpu/%s_me.bin", chip_name); 948 if (err) 949 goto out; 950 951 err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, 952 AMDGPU_UCODE_REQUIRED, 953 "amdgpu/%s_ce.bin", chip_name); 954 if (err) 955 goto out; 956 957 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, 958 AMDGPU_UCODE_REQUIRED, 959 "amdgpu/%s_mec.bin", chip_name); 960 if (err) 961 goto out; 962 963 if (adev->asic_type == CHIP_KAVERI) { 964 err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, 965 AMDGPU_UCODE_REQUIRED, 966 "amdgpu/%s_mec2.bin", chip_name); 967 if (err) 968 goto out; 969 } 970 971 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, 972 AMDGPU_UCODE_REQUIRED, 973 "amdgpu/%s_rlc.bin", chip_name); 974 out: 975 if (err) { 976 pr_err("gfx7: Failed to load firmware %s gfx firmware\n", chip_name); 977 gfx_v7_0_free_microcode(adev); 978 } 979 return err; 980 } 981 982 /** 983 * gfx_v7_0_tiling_mode_table_init - init the hw tiling table 984 * 985 * @adev: amdgpu_device pointer 986 * 987 * Starting with SI, the tiling setup is done globally in a 988 * set of 32 tiling modes. Rather than selecting each set of 989 * parameters per surface as on older asics, we just select 990 * which index in the tiling table we want to use, and the 991 * surface uses those parameters (CIK). 992 */ 993 static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) 994 { 995 const u32 num_tile_mode_states = 996 ARRAY_SIZE(adev->gfx.config.tile_mode_array); 997 const u32 num_secondary_tile_mode_states = 998 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); 999 u32 reg_offset, split_equal_to_row_size; 1000 uint32_t *tile, *macrotile; 1001 1002 tile = adev->gfx.config.tile_mode_array; 1003 macrotile = adev->gfx.config.macrotile_mode_array; 1004 1005 switch (adev->gfx.config.mem_row_size_in_kb) { 1006 case 1: 1007 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB; 1008 break; 1009 case 2: 1010 default: 1011 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB; 1012 break; 1013 case 4: 1014 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB; 1015 break; 1016 } 1017 1018 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 1019 tile[reg_offset] = 0; 1020 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 1021 macrotile[reg_offset] = 0; 1022 1023 switch (adev->asic_type) { 1024 case CHIP_BONAIRE: 1025 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1026 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1027 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1028 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1029 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1030 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1031 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1032 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1033 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1034 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1035 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1036 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1037 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1038 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1039 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1040 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1041 tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1042 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1043 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1044 TILE_SPLIT(split_equal_to_row_size)); 1045 tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1046 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1047 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1048 tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1049 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1050 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1051 TILE_SPLIT(split_equal_to_row_size)); 1052 tile[7] = (TILE_SPLIT(split_equal_to_row_size)); 1053 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 1054 PIPE_CONFIG(ADDR_SURF_P4_16x16)); 1055 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1056 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1057 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); 1058 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1059 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1060 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1061 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1062 tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1063 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1064 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1065 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1066 tile[12] = (TILE_SPLIT(split_equal_to_row_size)); 1067 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1068 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1069 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 1070 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1071 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1072 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1073 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1074 tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 1075 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1076 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1077 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1078 tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1079 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1080 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1081 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1082 tile[17] = (TILE_SPLIT(split_equal_to_row_size)); 1083 tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1084 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1085 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1086 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1087 tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1088 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1089 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 1090 tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1091 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1092 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1093 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1094 tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 1095 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1096 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1097 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1098 tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1099 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1100 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1101 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1102 tile[23] = (TILE_SPLIT(split_equal_to_row_size)); 1103 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1104 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1105 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1106 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1107 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 1108 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1109 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1110 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1111 tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 1112 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1113 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1114 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1115 tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1116 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1117 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); 1118 tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1119 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1120 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1121 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1122 tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1123 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1124 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1125 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1126 tile[30] = (TILE_SPLIT(split_equal_to_row_size)); 1127 1128 macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1129 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1130 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1131 NUM_BANKS(ADDR_SURF_16_BANK)); 1132 macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1133 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1134 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1135 NUM_BANKS(ADDR_SURF_16_BANK)); 1136 macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1137 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1138 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1139 NUM_BANKS(ADDR_SURF_16_BANK)); 1140 macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1141 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1142 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1143 NUM_BANKS(ADDR_SURF_16_BANK)); 1144 macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1145 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1146 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1147 NUM_BANKS(ADDR_SURF_16_BANK)); 1148 macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1149 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1150 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1151 NUM_BANKS(ADDR_SURF_8_BANK)); 1152 macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1153 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1154 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1155 NUM_BANKS(ADDR_SURF_4_BANK)); 1156 macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 1157 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | 1158 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1159 NUM_BANKS(ADDR_SURF_16_BANK)); 1160 macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 1161 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1162 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1163 NUM_BANKS(ADDR_SURF_16_BANK)); 1164 macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1165 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1166 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1167 NUM_BANKS(ADDR_SURF_16_BANK)); 1168 macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1169 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1170 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1171 NUM_BANKS(ADDR_SURF_16_BANK)); 1172 macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1173 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1174 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1175 NUM_BANKS(ADDR_SURF_16_BANK)); 1176 macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1177 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1178 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1179 NUM_BANKS(ADDR_SURF_8_BANK)); 1180 macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1181 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1182 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1183 NUM_BANKS(ADDR_SURF_4_BANK)); 1184 1185 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 1186 WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]); 1187 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 1188 if (reg_offset != 7) 1189 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]); 1190 break; 1191 case CHIP_HAWAII: 1192 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1193 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1194 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1195 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1196 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1197 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1198 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1199 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1200 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1201 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1202 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1203 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1204 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1205 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1206 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1207 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1208 tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1209 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1210 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1211 TILE_SPLIT(split_equal_to_row_size)); 1212 tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1213 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1214 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1215 TILE_SPLIT(split_equal_to_row_size)); 1216 tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1217 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1218 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1219 TILE_SPLIT(split_equal_to_row_size)); 1220 tile[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1221 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1222 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1223 TILE_SPLIT(split_equal_to_row_size)); 1224 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 1225 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); 1226 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1227 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1228 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); 1229 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1230 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1231 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1232 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1233 tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1234 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1235 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1236 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1237 tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | 1238 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1239 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1240 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1241 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1242 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1243 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 1244 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1245 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1246 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1247 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1248 tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 1249 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1250 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1251 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1252 tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1253 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1254 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1255 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1256 tile[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1257 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1258 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1259 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1260 tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1261 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1262 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1263 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1264 tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1265 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1266 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); 1267 tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1268 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1269 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1270 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1271 tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 1272 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1273 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1274 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1275 tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1276 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1277 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1278 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1279 tile[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1280 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1281 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1282 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1283 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1284 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1285 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1286 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1287 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 1288 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1289 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1290 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1291 tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 1292 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1293 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1294 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1295 tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1296 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1297 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); 1298 tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1299 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1300 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1301 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1302 tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1303 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1304 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1305 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1306 tile[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1307 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1308 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1309 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1310 1311 macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1312 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1313 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1314 NUM_BANKS(ADDR_SURF_16_BANK)); 1315 macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1316 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1317 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1318 NUM_BANKS(ADDR_SURF_16_BANK)); 1319 macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1320 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1321 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1322 NUM_BANKS(ADDR_SURF_16_BANK)); 1323 macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1324 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1325 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1326 NUM_BANKS(ADDR_SURF_16_BANK)); 1327 macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1328 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1329 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1330 NUM_BANKS(ADDR_SURF_8_BANK)); 1331 macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1332 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1333 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1334 NUM_BANKS(ADDR_SURF_4_BANK)); 1335 macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1336 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1337 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1338 NUM_BANKS(ADDR_SURF_4_BANK)); 1339 macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1340 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1341 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1342 NUM_BANKS(ADDR_SURF_16_BANK)); 1343 macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1344 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1345 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1346 NUM_BANKS(ADDR_SURF_16_BANK)); 1347 macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1348 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1349 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1350 NUM_BANKS(ADDR_SURF_16_BANK)); 1351 macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1352 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1353 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1354 NUM_BANKS(ADDR_SURF_8_BANK)); 1355 macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1356 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1357 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1358 NUM_BANKS(ADDR_SURF_16_BANK)); 1359 macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1360 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1361 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1362 NUM_BANKS(ADDR_SURF_8_BANK)); 1363 macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1364 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1365 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1366 NUM_BANKS(ADDR_SURF_4_BANK)); 1367 1368 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 1369 WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]); 1370 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 1371 if (reg_offset != 7) 1372 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]); 1373 break; 1374 case CHIP_KABINI: 1375 case CHIP_KAVERI: 1376 case CHIP_MULLINS: 1377 default: 1378 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1379 PIPE_CONFIG(ADDR_SURF_P2) | 1380 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1381 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1382 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1383 PIPE_CONFIG(ADDR_SURF_P2) | 1384 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1385 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1386 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1387 PIPE_CONFIG(ADDR_SURF_P2) | 1388 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1389 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1390 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1391 PIPE_CONFIG(ADDR_SURF_P2) | 1392 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1393 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1394 tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1395 PIPE_CONFIG(ADDR_SURF_P2) | 1396 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1397 TILE_SPLIT(split_equal_to_row_size)); 1398 tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1399 PIPE_CONFIG(ADDR_SURF_P2) | 1400 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1401 tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1402 PIPE_CONFIG(ADDR_SURF_P2) | 1403 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1404 TILE_SPLIT(split_equal_to_row_size)); 1405 tile[7] = (TILE_SPLIT(split_equal_to_row_size)); 1406 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 1407 PIPE_CONFIG(ADDR_SURF_P2)); 1408 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1409 PIPE_CONFIG(ADDR_SURF_P2) | 1410 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); 1411 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1412 PIPE_CONFIG(ADDR_SURF_P2) | 1413 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1414 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1415 tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1416 PIPE_CONFIG(ADDR_SURF_P2) | 1417 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1418 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1419 tile[12] = (TILE_SPLIT(split_equal_to_row_size)); 1420 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1421 PIPE_CONFIG(ADDR_SURF_P2) | 1422 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 1423 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1424 PIPE_CONFIG(ADDR_SURF_P2) | 1425 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1426 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1427 tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 1428 PIPE_CONFIG(ADDR_SURF_P2) | 1429 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1430 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1431 tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1432 PIPE_CONFIG(ADDR_SURF_P2) | 1433 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1434 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1435 tile[17] = (TILE_SPLIT(split_equal_to_row_size)); 1436 tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1437 PIPE_CONFIG(ADDR_SURF_P2) | 1438 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1439 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1440 tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1441 PIPE_CONFIG(ADDR_SURF_P2) | 1442 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); 1443 tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1444 PIPE_CONFIG(ADDR_SURF_P2) | 1445 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1446 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1447 tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 1448 PIPE_CONFIG(ADDR_SURF_P2) | 1449 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1450 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1451 tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1452 PIPE_CONFIG(ADDR_SURF_P2) | 1453 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1454 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1455 tile[23] = (TILE_SPLIT(split_equal_to_row_size)); 1456 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1457 PIPE_CONFIG(ADDR_SURF_P2) | 1458 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1459 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1460 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 1461 PIPE_CONFIG(ADDR_SURF_P2) | 1462 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1463 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1464 tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 1465 PIPE_CONFIG(ADDR_SURF_P2) | 1466 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1467 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1468 tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1469 PIPE_CONFIG(ADDR_SURF_P2) | 1470 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); 1471 tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1472 PIPE_CONFIG(ADDR_SURF_P2) | 1473 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1474 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1475 tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1476 PIPE_CONFIG(ADDR_SURF_P2) | 1477 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1478 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1479 tile[30] = (TILE_SPLIT(split_equal_to_row_size)); 1480 1481 macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1482 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1483 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1484 NUM_BANKS(ADDR_SURF_8_BANK)); 1485 macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1486 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1487 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1488 NUM_BANKS(ADDR_SURF_8_BANK)); 1489 macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1490 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1491 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1492 NUM_BANKS(ADDR_SURF_8_BANK)); 1493 macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1494 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1495 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1496 NUM_BANKS(ADDR_SURF_8_BANK)); 1497 macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1498 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1499 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1500 NUM_BANKS(ADDR_SURF_8_BANK)); 1501 macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1502 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1503 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1504 NUM_BANKS(ADDR_SURF_8_BANK)); 1505 macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1506 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1507 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1508 NUM_BANKS(ADDR_SURF_8_BANK)); 1509 macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | 1510 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | 1511 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1512 NUM_BANKS(ADDR_SURF_16_BANK)); 1513 macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | 1514 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1515 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1516 NUM_BANKS(ADDR_SURF_16_BANK)); 1517 macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 1518 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1519 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1520 NUM_BANKS(ADDR_SURF_16_BANK)); 1521 macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 1522 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1523 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1524 NUM_BANKS(ADDR_SURF_16_BANK)); 1525 macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1526 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1527 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1528 NUM_BANKS(ADDR_SURF_16_BANK)); 1529 macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1530 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1531 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1532 NUM_BANKS(ADDR_SURF_16_BANK)); 1533 macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1534 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1535 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1536 NUM_BANKS(ADDR_SURF_8_BANK)); 1537 1538 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 1539 WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]); 1540 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 1541 if (reg_offset != 7) 1542 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]); 1543 break; 1544 } 1545 } 1546 1547 /** 1548 * gfx_v7_0_select_se_sh - select which SE, SH to address 1549 * 1550 * @adev: amdgpu_device pointer 1551 * @se_num: shader engine to address 1552 * @sh_num: sh block to address 1553 * @instance: Certain registers are instanced per SE or SH. 1554 * 0xffffffff means broadcast to all SEs or SHs (CIK). 1555 * @xcc_id: xcc accelerated compute core id 1556 * Select which SE, SH combinations to address. 1557 */ 1558 static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, 1559 u32 se_num, u32 sh_num, u32 instance, 1560 int xcc_id) 1561 { 1562 u32 data; 1563 1564 if (instance == 0xffffffff) 1565 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); 1566 else 1567 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); 1568 1569 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) 1570 data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | 1571 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK; 1572 else if (se_num == 0xffffffff) 1573 data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK | 1574 (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT); 1575 else if (sh_num == 0xffffffff) 1576 data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | 1577 (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT); 1578 else 1579 data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) | 1580 (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT); 1581 WREG32(mmGRBM_GFX_INDEX, data); 1582 } 1583 1584 /** 1585 * gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs 1586 * 1587 * @adev: amdgpu_device pointer 1588 * 1589 * Calculates the bitmask of enabled RBs (CIK). 1590 * Returns the enabled RB bitmask. 1591 */ 1592 static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev) 1593 { 1594 u32 data, mask; 1595 1596 data = RREG32(mmCC_RB_BACKEND_DISABLE); 1597 data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); 1598 1599 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; 1600 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; 1601 1602 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se / 1603 adev->gfx.config.max_sh_per_se); 1604 1605 return (~data) & mask; 1606 } 1607 1608 static void 1609 gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1) 1610 { 1611 switch (adev->asic_type) { 1612 case CHIP_BONAIRE: 1613 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) | 1614 SE_XSEL(1) | SE_YSEL(1); 1615 *rconf1 |= 0x0; 1616 break; 1617 case CHIP_HAWAII: 1618 *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) | 1619 RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) | 1620 PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) | 1621 SE_YSEL(3); 1622 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) | 1623 SE_PAIR_YSEL(2); 1624 break; 1625 case CHIP_KAVERI: 1626 *rconf |= RB_MAP_PKR0(2); 1627 *rconf1 |= 0x0; 1628 break; 1629 case CHIP_KABINI: 1630 case CHIP_MULLINS: 1631 *rconf |= 0x0; 1632 *rconf1 |= 0x0; 1633 break; 1634 default: 1635 DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type); 1636 break; 1637 } 1638 } 1639 1640 static void 1641 gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev, 1642 u32 raster_config, u32 raster_config_1, 1643 unsigned rb_mask, unsigned num_rb) 1644 { 1645 unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1); 1646 unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1); 1647 unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2); 1648 unsigned rb_per_se = num_rb / num_se; 1649 unsigned se_mask[4]; 1650 unsigned se; 1651 1652 se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask; 1653 se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask; 1654 se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask; 1655 se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask; 1656 1657 WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4)); 1658 WARN_ON(!(sh_per_se == 1 || sh_per_se == 2)); 1659 WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2)); 1660 1661 if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) || 1662 (!se_mask[2] && !se_mask[3]))) { 1663 raster_config_1 &= ~SE_PAIR_MAP_MASK; 1664 1665 if (!se_mask[0] && !se_mask[1]) { 1666 raster_config_1 |= 1667 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3); 1668 } else { 1669 raster_config_1 |= 1670 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0); 1671 } 1672 } 1673 1674 for (se = 0; se < num_se; se++) { 1675 unsigned raster_config_se = raster_config; 1676 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se); 1677 unsigned pkr1_mask = pkr0_mask << rb_per_pkr; 1678 int idx = (se / 2) * 2; 1679 1680 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) { 1681 raster_config_se &= ~SE_MAP_MASK; 1682 1683 if (!se_mask[idx]) { 1684 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3); 1685 } else { 1686 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0); 1687 } 1688 } 1689 1690 pkr0_mask &= rb_mask; 1691 pkr1_mask &= rb_mask; 1692 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) { 1693 raster_config_se &= ~PKR_MAP_MASK; 1694 1695 if (!pkr0_mask) { 1696 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3); 1697 } else { 1698 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0); 1699 } 1700 } 1701 1702 if (rb_per_se >= 2) { 1703 unsigned rb0_mask = 1 << (se * rb_per_se); 1704 unsigned rb1_mask = rb0_mask << 1; 1705 1706 rb0_mask &= rb_mask; 1707 rb1_mask &= rb_mask; 1708 if (!rb0_mask || !rb1_mask) { 1709 raster_config_se &= ~RB_MAP_PKR0_MASK; 1710 1711 if (!rb0_mask) { 1712 raster_config_se |= 1713 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3); 1714 } else { 1715 raster_config_se |= 1716 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0); 1717 } 1718 } 1719 1720 if (rb_per_se > 2) { 1721 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr); 1722 rb1_mask = rb0_mask << 1; 1723 rb0_mask &= rb_mask; 1724 rb1_mask &= rb_mask; 1725 if (!rb0_mask || !rb1_mask) { 1726 raster_config_se &= ~RB_MAP_PKR1_MASK; 1727 1728 if (!rb0_mask) { 1729 raster_config_se |= 1730 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3); 1731 } else { 1732 raster_config_se |= 1733 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0); 1734 } 1735 } 1736 } 1737 } 1738 1739 /* GRBM_GFX_INDEX has a different offset on CI+ */ 1740 gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0); 1741 WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se); 1742 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); 1743 } 1744 1745 /* GRBM_GFX_INDEX has a different offset on CI+ */ 1746 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 1747 } 1748 1749 /** 1750 * gfx_v7_0_setup_rb - setup the RBs on the asic 1751 * 1752 * @adev: amdgpu_device pointer 1753 * 1754 * Configures per-SE/SH RB registers (CIK). 1755 */ 1756 static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) 1757 { 1758 int i, j; 1759 u32 data; 1760 u32 raster_config = 0, raster_config_1 = 0; 1761 u32 active_rbs = 0; 1762 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / 1763 adev->gfx.config.max_sh_per_se; 1764 unsigned num_rb_pipes; 1765 1766 mutex_lock(&adev->grbm_idx_mutex); 1767 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1768 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1769 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); 1770 data = gfx_v7_0_get_rb_active_bitmap(adev); 1771 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 1772 rb_bitmap_width_per_sh); 1773 } 1774 } 1775 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 1776 1777 adev->gfx.config.backend_enable_mask = active_rbs; 1778 adev->gfx.config.num_rbs = hweight32(active_rbs); 1779 1780 num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se * 1781 adev->gfx.config.max_shader_engines, 16); 1782 1783 gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1); 1784 1785 if (!adev->gfx.config.backend_enable_mask || 1786 adev->gfx.config.num_rbs >= num_rb_pipes) { 1787 WREG32(mmPA_SC_RASTER_CONFIG, raster_config); 1788 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); 1789 } else { 1790 gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1, 1791 adev->gfx.config.backend_enable_mask, 1792 num_rb_pipes); 1793 } 1794 1795 /* cache the values for userspace */ 1796 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1797 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1798 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); 1799 adev->gfx.config.rb_config[i][j].rb_backend_disable = 1800 RREG32(mmCC_RB_BACKEND_DISABLE); 1801 adev->gfx.config.rb_config[i][j].user_rb_backend_disable = 1802 RREG32(mmGC_USER_RB_BACKEND_DISABLE); 1803 adev->gfx.config.rb_config[i][j].raster_config = 1804 RREG32(mmPA_SC_RASTER_CONFIG); 1805 adev->gfx.config.rb_config[i][j].raster_config_1 = 1806 RREG32(mmPA_SC_RASTER_CONFIG_1); 1807 } 1808 } 1809 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 1810 mutex_unlock(&adev->grbm_idx_mutex); 1811 } 1812 1813 #define DEFAULT_SH_MEM_BASES (0x6000) 1814 /** 1815 * gfx_v7_0_init_compute_vmid - gart enable 1816 * 1817 * @adev: amdgpu_device pointer 1818 * 1819 * Initialize compute vmid sh_mem registers 1820 * 1821 */ 1822 static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) 1823 { 1824 int i; 1825 uint32_t sh_mem_config; 1826 uint32_t sh_mem_bases; 1827 1828 /* 1829 * Configure apertures: 1830 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 1831 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 1832 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 1833 */ 1834 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16); 1835 sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED << 1836 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; 1837 sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT; 1838 mutex_lock(&adev->srbm_mutex); 1839 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1840 cik_srbm_select(adev, 0, 0, 0, i); 1841 /* CP and shaders */ 1842 WREG32(mmSH_MEM_CONFIG, sh_mem_config); 1843 WREG32(mmSH_MEM_APE1_BASE, 1); 1844 WREG32(mmSH_MEM_APE1_LIMIT, 0); 1845 WREG32(mmSH_MEM_BASES, sh_mem_bases); 1846 } 1847 cik_srbm_select(adev, 0, 0, 0, 0); 1848 mutex_unlock(&adev->srbm_mutex); 1849 1850 /* Initialize all compute VMIDs to have no GDS, GWS, or OA 1851 access. These should be enabled by FW for target VMIDs. */ 1852 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1853 WREG32(amdgpu_gds_reg_offset[i].mem_base, 0); 1854 WREG32(amdgpu_gds_reg_offset[i].mem_size, 0); 1855 WREG32(amdgpu_gds_reg_offset[i].gws, 0); 1856 WREG32(amdgpu_gds_reg_offset[i].oa, 0); 1857 } 1858 } 1859 1860 static void gfx_v7_0_init_gds_vmid(struct amdgpu_device *adev) 1861 { 1862 int vmid; 1863 1864 /* 1865 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA 1866 * access. Compute VMIDs should be enabled by FW for target VMIDs, 1867 * the driver can enable them for graphics. VMID0 should maintain 1868 * access so that HWS firmware can save/restore entries. 1869 */ 1870 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) { 1871 WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0); 1872 WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0); 1873 WREG32(amdgpu_gds_reg_offset[vmid].gws, 0); 1874 WREG32(amdgpu_gds_reg_offset[vmid].oa, 0); 1875 } 1876 } 1877 1878 static void gfx_v7_0_config_init(struct amdgpu_device *adev) 1879 { 1880 adev->gfx.config.double_offchip_lds_buf = 1; 1881 } 1882 1883 /** 1884 * gfx_v7_0_constants_init - setup the 3D engine 1885 * 1886 * @adev: amdgpu_device pointer 1887 * 1888 * init the gfx constants such as the 3D engine, tiling configuration 1889 * registers, maximum number of quad pipes, render backends... 1890 */ 1891 static void gfx_v7_0_constants_init(struct amdgpu_device *adev) 1892 { 1893 u32 sh_mem_cfg, sh_static_mem_cfg, sh_mem_base; 1894 u32 tmp; 1895 int i; 1896 1897 WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT)); 1898 1899 WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 1900 WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 1901 WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config); 1902 1903 gfx_v7_0_tiling_mode_table_init(adev); 1904 1905 gfx_v7_0_setup_rb(adev); 1906 gfx_v7_0_get_cu_info(adev); 1907 gfx_v7_0_config_init(adev); 1908 1909 /* set HW defaults for 3D engine */ 1910 WREG32(mmCP_MEQ_THRESHOLDS, 1911 (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) | 1912 (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT)); 1913 1914 mutex_lock(&adev->grbm_idx_mutex); 1915 /* 1916 * making sure that the following register writes will be broadcasted 1917 * to all the shaders 1918 */ 1919 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 1920 1921 /* XXX SH_MEM regs */ 1922 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1923 sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1924 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1925 sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, DEFAULT_MTYPE, 1926 MTYPE_NC); 1927 sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, APE1_MTYPE, 1928 MTYPE_UC); 1929 sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, PRIVATE_ATC, 0); 1930 1931 sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG, 1932 SWIZZLE_ENABLE, 1); 1933 sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG, 1934 ELEMENT_SIZE, 1); 1935 sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG, 1936 INDEX_STRIDE, 3); 1937 WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg); 1938 1939 mutex_lock(&adev->srbm_mutex); 1940 for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) { 1941 if (i == 0) 1942 sh_mem_base = 0; 1943 else 1944 sh_mem_base = adev->gmc.shared_aperture_start >> 48; 1945 cik_srbm_select(adev, 0, 0, 0, i); 1946 /* CP and shaders */ 1947 WREG32(mmSH_MEM_CONFIG, sh_mem_cfg); 1948 WREG32(mmSH_MEM_APE1_BASE, 1); 1949 WREG32(mmSH_MEM_APE1_LIMIT, 0); 1950 WREG32(mmSH_MEM_BASES, sh_mem_base); 1951 } 1952 cik_srbm_select(adev, 0, 0, 0, 0); 1953 mutex_unlock(&adev->srbm_mutex); 1954 1955 gfx_v7_0_init_compute_vmid(adev); 1956 gfx_v7_0_init_gds_vmid(adev); 1957 1958 WREG32(mmSX_DEBUG_1, 0x20); 1959 1960 WREG32(mmTA_CNTL_AUX, 0x00010000); 1961 1962 tmp = RREG32(mmSPI_CONFIG_CNTL); 1963 tmp |= 0x03000000; 1964 WREG32(mmSPI_CONFIG_CNTL, tmp); 1965 1966 WREG32(mmSQ_CONFIG, 1); 1967 1968 WREG32(mmDB_DEBUG, 0); 1969 1970 tmp = RREG32(mmDB_DEBUG2) & ~0xf00fffff; 1971 tmp |= 0x00000400; 1972 WREG32(mmDB_DEBUG2, tmp); 1973 1974 tmp = RREG32(mmDB_DEBUG3) & ~0x0002021c; 1975 tmp |= 0x00020200; 1976 WREG32(mmDB_DEBUG3, tmp); 1977 1978 tmp = RREG32(mmCB_HW_CONTROL) & ~0x00010000; 1979 tmp |= 0x00018208; 1980 WREG32(mmCB_HW_CONTROL, tmp); 1981 1982 WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT)); 1983 1984 WREG32(mmPA_SC_FIFO_SIZE, 1985 ((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) | 1986 (adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) | 1987 (adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | 1988 (adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT))); 1989 1990 WREG32(mmVGT_NUM_INSTANCES, 1); 1991 1992 WREG32(mmCP_PERFMON_CNTL, 0); 1993 1994 WREG32(mmSQ_CONFIG, 0); 1995 1996 WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS, 1997 ((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) | 1998 (255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT))); 1999 2000 WREG32(mmVGT_CACHE_INVALIDATION, 2001 (VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) | 2002 (ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT)); 2003 2004 WREG32(mmVGT_GS_VERTEX_REUSE, 16); 2005 WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0); 2006 2007 WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK | 2008 (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT)); 2009 WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK); 2010 2011 tmp = RREG32(mmSPI_ARB_PRIORITY); 2012 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2); 2013 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2); 2014 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2); 2015 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2); 2016 WREG32(mmSPI_ARB_PRIORITY, tmp); 2017 2018 mutex_unlock(&adev->grbm_idx_mutex); 2019 2020 udelay(50); 2021 } 2022 2023 /** 2024 * gfx_v7_0_ring_test_ring - basic gfx ring test 2025 * 2026 * @ring: amdgpu_ring structure holding ring information 2027 * 2028 * Allocate a scratch register and write to it using the gfx ring (CIK). 2029 * Provides a basic gfx ring test to verify that the ring is working. 2030 * Used by gfx_v7_0_cp_gfx_resume(); 2031 * Returns 0 on success, error on failure. 2032 */ 2033 static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) 2034 { 2035 struct amdgpu_device *adev = ring->adev; 2036 uint32_t tmp = 0; 2037 unsigned i; 2038 int r; 2039 2040 WREG32(mmSCRATCH_REG0, 0xCAFEDEAD); 2041 r = amdgpu_ring_alloc(ring, 3); 2042 if (r) 2043 return r; 2044 2045 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 2046 amdgpu_ring_write(ring, mmSCRATCH_REG0 - PACKET3_SET_UCONFIG_REG_START); 2047 amdgpu_ring_write(ring, 0xDEADBEEF); 2048 amdgpu_ring_commit(ring); 2049 2050 for (i = 0; i < adev->usec_timeout; i++) { 2051 tmp = RREG32(mmSCRATCH_REG0); 2052 if (tmp == 0xDEADBEEF) 2053 break; 2054 udelay(1); 2055 } 2056 if (i >= adev->usec_timeout) 2057 r = -ETIMEDOUT; 2058 return r; 2059 } 2060 2061 /** 2062 * gfx_v7_0_ring_emit_hdp_flush - emit an hdp flush on the cp 2063 * 2064 * @ring: amdgpu_ring structure holding ring information 2065 * 2066 * Emits an hdp flush on the cp. 2067 */ 2068 static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 2069 { 2070 u32 ref_and_mask; 2071 int usepfp; 2072 struct amdgpu_device *adev = ring->adev; 2073 2074 if (!adev->gfx.funcs->get_hdp_flush_mask) { 2075 dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__); 2076 return; 2077 } 2078 2079 adev->gfx.funcs->get_hdp_flush_mask(ring, &ref_and_mask, &usepfp); 2080 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 2081 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */ 2082 WAIT_REG_MEM_FUNCTION(3) | /* == */ 2083 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */ 2084 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ); 2085 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE); 2086 amdgpu_ring_write(ring, ref_and_mask); 2087 amdgpu_ring_write(ring, ref_and_mask); 2088 amdgpu_ring_write(ring, 0x20); /* poll interval */ 2089 } 2090 2091 static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring) 2092 { 2093 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0)); 2094 amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) | 2095 EVENT_INDEX(4)); 2096 2097 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0)); 2098 amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) | 2099 EVENT_INDEX(0)); 2100 } 2101 2102 /** 2103 * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring 2104 * 2105 * @ring: amdgpu_ring structure holding ring information 2106 * @addr: address 2107 * @seq: sequence number 2108 * @flags: fence related flags 2109 * 2110 * Emits a fence sequence number on the gfx ring and flushes 2111 * GPU caches. 2112 */ 2113 static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, 2114 u64 seq, unsigned flags) 2115 { 2116 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 2117 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 2118 bool exec = flags & AMDGPU_FENCE_FLAG_EXEC; 2119 2120 /* Workaround for cache flush problems. First send a dummy EOP 2121 * event down the pipe with seq one below. 2122 */ 2123 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 2124 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | 2125 EOP_TC_ACTION_EN | 2126 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 2127 EVENT_INDEX(5))); 2128 amdgpu_ring_write(ring, addr & 0xfffffffc); 2129 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | 2130 DATA_SEL(1) | INT_SEL(0)); 2131 amdgpu_ring_write(ring, lower_32_bits(seq - 1)); 2132 amdgpu_ring_write(ring, upper_32_bits(seq - 1)); 2133 2134 /* Then send the real EOP event down the pipe. */ 2135 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 2136 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | 2137 EOP_TC_ACTION_EN | 2138 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 2139 EVENT_INDEX(5) | 2140 (exec ? EOP_EXEC : 0))); 2141 amdgpu_ring_write(ring, addr & 0xfffffffc); 2142 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | 2143 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 2144 amdgpu_ring_write(ring, lower_32_bits(seq)); 2145 amdgpu_ring_write(ring, upper_32_bits(seq)); 2146 } 2147 2148 /** 2149 * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring 2150 * 2151 * @ring: amdgpu_ring structure holding ring information 2152 * @addr: address 2153 * @seq: sequence number 2154 * @flags: fence related flags 2155 * 2156 * Emits a fence sequence number on the compute ring and flushes 2157 * GPU caches. 2158 */ 2159 static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, 2160 u64 addr, u64 seq, 2161 unsigned flags) 2162 { 2163 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 2164 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 2165 2166 /* RELEASE_MEM - flush caches, send int */ 2167 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5)); 2168 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | 2169 EOP_TC_ACTION_EN | 2170 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 2171 EVENT_INDEX(5))); 2172 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 2173 amdgpu_ring_write(ring, addr & 0xfffffffc); 2174 amdgpu_ring_write(ring, upper_32_bits(addr)); 2175 amdgpu_ring_write(ring, lower_32_bits(seq)); 2176 amdgpu_ring_write(ring, upper_32_bits(seq)); 2177 } 2178 2179 /* 2180 * IB stuff 2181 */ 2182 /** 2183 * gfx_v7_0_ring_emit_ib_gfx - emit an IB (Indirect Buffer) on the ring 2184 * 2185 * @ring: amdgpu_ring structure holding ring information 2186 * @job: job to retrieve vmid from 2187 * @ib: amdgpu indirect buffer object 2188 * @flags: options (AMDGPU_HAVE_CTX_SWITCH) 2189 * 2190 * Emits an DE (drawing engine) or CE (constant engine) IB 2191 * on the gfx ring. IBs are usually generated by userspace 2192 * acceleration drivers and submitted to the kernel for 2193 * scheduling on the ring. This function schedules the IB 2194 * on the gfx ring for execution by the GPU. 2195 */ 2196 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 2197 struct amdgpu_job *job, 2198 struct amdgpu_ib *ib, 2199 uint32_t flags) 2200 { 2201 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 2202 u32 header, control = 0; 2203 2204 /* insert SWITCH_BUFFER packet before first IB in the ring frame */ 2205 if (flags & AMDGPU_HAVE_CTX_SWITCH) { 2206 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 2207 amdgpu_ring_write(ring, 0); 2208 } 2209 2210 if (ib->flags & AMDGPU_IB_FLAG_CE) 2211 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); 2212 else 2213 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 2214 2215 control |= ib->length_dw | (vmid << 24); 2216 2217 amdgpu_ring_write(ring, header); 2218 amdgpu_ring_write(ring, 2219 #ifdef __BIG_ENDIAN 2220 (2 << 0) | 2221 #endif 2222 (ib->gpu_addr & 0xFFFFFFFC)); 2223 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); 2224 amdgpu_ring_write(ring, control); 2225 } 2226 2227 static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 2228 struct amdgpu_job *job, 2229 struct amdgpu_ib *ib, 2230 uint32_t flags) 2231 { 2232 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 2233 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 2234 2235 /* Currently, there is a high possibility to get wave ID mismatch 2236 * between ME and GDS, leading to a hw deadlock, because ME generates 2237 * different wave IDs than the GDS expects. This situation happens 2238 * randomly when at least 5 compute pipes use GDS ordered append. 2239 * The wave IDs generated by ME are also wrong after suspend/resume. 2240 * Those are probably bugs somewhere else in the kernel driver. 2241 * 2242 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and 2243 * GDS to 0 for this ring (me/pipe). 2244 */ 2245 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { 2246 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2247 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START); 2248 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); 2249 } 2250 2251 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 2252 amdgpu_ring_write(ring, 2253 #ifdef __BIG_ENDIAN 2254 (2 << 0) | 2255 #endif 2256 (ib->gpu_addr & 0xFFFFFFFC)); 2257 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); 2258 amdgpu_ring_write(ring, control); 2259 } 2260 2261 static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) 2262 { 2263 uint32_t dw2 = 0; 2264 2265 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 2266 if (flags & AMDGPU_HAVE_CTX_SWITCH) { 2267 gfx_v7_0_ring_emit_vgt_flush(ring); 2268 /* set load_global_config & load_global_uconfig */ 2269 dw2 |= 0x8001; 2270 /* set load_cs_sh_regs */ 2271 dw2 |= 0x01000000; 2272 /* set load_per_context_state & load_gfx_sh_regs */ 2273 dw2 |= 0x10002; 2274 } 2275 2276 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 2277 amdgpu_ring_write(ring, dw2); 2278 amdgpu_ring_write(ring, 0); 2279 } 2280 2281 /** 2282 * gfx_v7_0_ring_test_ib - basic ring IB test 2283 * 2284 * @ring: amdgpu_ring structure holding ring information 2285 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT 2286 * 2287 * Allocate an IB and execute it on the gfx ring (CIK). 2288 * Provides a basic gfx ring test to verify that IBs are working. 2289 * Returns 0 on success, error on failure. 2290 */ 2291 static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 2292 { 2293 struct amdgpu_device *adev = ring->adev; 2294 struct amdgpu_ib ib; 2295 struct dma_fence *f = NULL; 2296 uint32_t tmp = 0; 2297 long r; 2298 2299 WREG32(mmSCRATCH_REG0, 0xCAFEDEAD); 2300 memset(&ib, 0, sizeof(ib)); 2301 r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); 2302 if (r) 2303 return r; 2304 2305 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); 2306 ib.ptr[1] = mmSCRATCH_REG0 - PACKET3_SET_UCONFIG_REG_START; 2307 ib.ptr[2] = 0xDEADBEEF; 2308 ib.length_dw = 3; 2309 2310 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 2311 if (r) 2312 goto error; 2313 2314 r = dma_fence_wait_timeout(f, false, timeout); 2315 if (r == 0) { 2316 r = -ETIMEDOUT; 2317 goto error; 2318 } else if (r < 0) { 2319 goto error; 2320 } 2321 tmp = RREG32(mmSCRATCH_REG0); 2322 if (tmp == 0xDEADBEEF) 2323 r = 0; 2324 else 2325 r = -EINVAL; 2326 2327 error: 2328 amdgpu_ib_free(&ib, NULL); 2329 dma_fence_put(f); 2330 return r; 2331 } 2332 2333 /* 2334 * CP. 2335 * On CIK, gfx and compute now have independent command processors. 2336 * 2337 * GFX 2338 * Gfx consists of a single ring and can process both gfx jobs and 2339 * compute jobs. The gfx CP consists of three microengines (ME): 2340 * PFP - Pre-Fetch Parser 2341 * ME - Micro Engine 2342 * CE - Constant Engine 2343 * The PFP and ME make up what is considered the Drawing Engine (DE). 2344 * The CE is an asynchronous engine used for updating buffer desciptors 2345 * used by the DE so that they can be loaded into cache in parallel 2346 * while the DE is processing state update packets. 2347 * 2348 * Compute 2349 * The compute CP consists of two microengines (ME): 2350 * MEC1 - Compute MicroEngine 1 2351 * MEC2 - Compute MicroEngine 2 2352 * Each MEC supports 4 compute pipes and each pipe supports 8 queues. 2353 * The queues are exposed to userspace and are programmed directly 2354 * by the compute runtime. 2355 */ 2356 /** 2357 * gfx_v7_0_cp_gfx_enable - enable/disable the gfx CP MEs 2358 * 2359 * @adev: amdgpu_device pointer 2360 * @enable: enable or disable the MEs 2361 * 2362 * Halts or unhalts the gfx MEs. 2363 */ 2364 static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) 2365 { 2366 if (enable) 2367 WREG32(mmCP_ME_CNTL, 0); 2368 else 2369 WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | 2370 CP_ME_CNTL__PFP_HALT_MASK | 2371 CP_ME_CNTL__CE_HALT_MASK)); 2372 udelay(50); 2373 } 2374 2375 /** 2376 * gfx_v7_0_cp_gfx_load_microcode - load the gfx CP ME ucode 2377 * 2378 * @adev: amdgpu_device pointer 2379 * 2380 * Loads the gfx PFP, ME, and CE ucode. 2381 * Returns 0 for success, -EINVAL if the ucode is not available. 2382 */ 2383 static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device *adev) 2384 { 2385 const struct gfx_firmware_header_v1_0 *pfp_hdr; 2386 const struct gfx_firmware_header_v1_0 *ce_hdr; 2387 const struct gfx_firmware_header_v1_0 *me_hdr; 2388 const __le32 *fw_data; 2389 unsigned i, fw_size; 2390 2391 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw) 2392 return -EINVAL; 2393 2394 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; 2395 ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; 2396 me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; 2397 2398 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2399 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); 2400 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 2401 adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version); 2402 adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version); 2403 adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version); 2404 adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version); 2405 adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version); 2406 adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version); 2407 2408 gfx_v7_0_cp_gfx_enable(adev, false); 2409 2410 /* PFP */ 2411 fw_data = (const __le32 *) 2412 (adev->gfx.pfp_fw->data + 2413 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); 2414 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4; 2415 WREG32(mmCP_PFP_UCODE_ADDR, 0); 2416 for (i = 0; i < fw_size; i++) 2417 WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); 2418 WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); 2419 2420 /* CE */ 2421 fw_data = (const __le32 *) 2422 (adev->gfx.ce_fw->data + 2423 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); 2424 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4; 2425 WREG32(mmCP_CE_UCODE_ADDR, 0); 2426 for (i = 0; i < fw_size; i++) 2427 WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); 2428 WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version); 2429 2430 /* ME */ 2431 fw_data = (const __le32 *) 2432 (adev->gfx.me_fw->data + 2433 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); 2434 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4; 2435 WREG32(mmCP_ME_RAM_WADDR, 0); 2436 for (i = 0; i < fw_size; i++) 2437 WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++)); 2438 WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version); 2439 2440 return 0; 2441 } 2442 2443 /** 2444 * gfx_v7_0_cp_gfx_start - start the gfx ring 2445 * 2446 * @adev: amdgpu_device pointer 2447 * 2448 * Enables the ring and loads the clear state context and other 2449 * packets required to init the ring. 2450 * Returns 0 for success, error for failure. 2451 */ 2452 static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev) 2453 { 2454 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; 2455 const struct cs_section_def *sect = NULL; 2456 const struct cs_extent_def *ext = NULL; 2457 int r, i; 2458 2459 /* init the CP */ 2460 WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1); 2461 WREG32(mmCP_ENDIAN_SWAP, 0); 2462 WREG32(mmCP_DEVICE_ID, 1); 2463 2464 gfx_v7_0_cp_gfx_enable(adev, true); 2465 2466 r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8); 2467 if (r) { 2468 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 2469 return r; 2470 } 2471 2472 /* init the CE partitions. CE only used for gfx on CIK */ 2473 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); 2474 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); 2475 amdgpu_ring_write(ring, 0x8000); 2476 amdgpu_ring_write(ring, 0x8000); 2477 2478 /* clear state buffer */ 2479 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 2480 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 2481 2482 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 2483 amdgpu_ring_write(ring, 0x80000000); 2484 amdgpu_ring_write(ring, 0x80000000); 2485 2486 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 2487 for (ext = sect->section; ext->extent != NULL; ++ext) { 2488 if (sect->id == SECT_CONTEXT) { 2489 amdgpu_ring_write(ring, 2490 PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); 2491 amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START); 2492 for (i = 0; i < ext->reg_count; i++) 2493 amdgpu_ring_write(ring, ext->extent[i]); 2494 } 2495 } 2496 } 2497 2498 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 2499 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); 2500 amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config); 2501 amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1); 2502 2503 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 2504 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 2505 2506 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 2507 amdgpu_ring_write(ring, 0); 2508 2509 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 2510 amdgpu_ring_write(ring, 0x00000316); 2511 amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 2512 amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ 2513 2514 amdgpu_ring_commit(ring); 2515 2516 return 0; 2517 } 2518 2519 /** 2520 * gfx_v7_0_cp_gfx_resume - setup the gfx ring buffer registers 2521 * 2522 * @adev: amdgpu_device pointer 2523 * 2524 * Program the location and size of the gfx ring buffer 2525 * and test it to make sure it's working. 2526 * Returns 0 for success, error for failure. 2527 */ 2528 static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev) 2529 { 2530 struct amdgpu_ring *ring; 2531 u32 tmp; 2532 u32 rb_bufsz; 2533 u64 rb_addr, rptr_addr; 2534 int r; 2535 2536 WREG32(mmCP_SEM_WAIT_TIMER, 0x0); 2537 if (adev->asic_type != CHIP_HAWAII) 2538 WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 2539 2540 /* Set the write pointer delay */ 2541 WREG32(mmCP_RB_WPTR_DELAY, 0); 2542 2543 /* set the RB to use vmid 0 */ 2544 WREG32(mmCP_RB_VMID, 0); 2545 2546 WREG32(mmSCRATCH_ADDR, 0); 2547 2548 /* ring 0 - compute and gfx */ 2549 /* Set ring buffer size */ 2550 ring = &adev->gfx.gfx_ring[0]; 2551 rb_bufsz = order_base_2(ring->ring_size / 8); 2552 tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2553 #ifdef __BIG_ENDIAN 2554 tmp |= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT; 2555 #endif 2556 WREG32(mmCP_RB0_CNTL, tmp); 2557 2558 /* Initialize the ring buffer's read and write pointers */ 2559 WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK); 2560 ring->wptr = 0; 2561 WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); 2562 2563 /* set the wb address whether it's enabled or not */ 2564 rptr_addr = ring->rptr_gpu_addr; 2565 WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); 2566 WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); 2567 2568 /* scratch register shadowing is no longer supported */ 2569 WREG32(mmSCRATCH_UMSK, 0); 2570 2571 mdelay(1); 2572 WREG32(mmCP_RB0_CNTL, tmp); 2573 2574 rb_addr = ring->gpu_addr >> 8; 2575 WREG32(mmCP_RB0_BASE, rb_addr); 2576 WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr)); 2577 2578 /* start the ring */ 2579 gfx_v7_0_cp_gfx_start(adev); 2580 r = amdgpu_ring_test_helper(ring); 2581 if (r) 2582 return r; 2583 2584 return 0; 2585 } 2586 2587 static u64 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring) 2588 { 2589 return *ring->rptr_cpu_addr; 2590 } 2591 2592 static u64 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 2593 { 2594 struct amdgpu_device *adev = ring->adev; 2595 2596 return RREG32(mmCP_RB0_WPTR); 2597 } 2598 2599 static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) 2600 { 2601 struct amdgpu_device *adev = ring->adev; 2602 2603 WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); 2604 (void)RREG32(mmCP_RB0_WPTR); 2605 } 2606 2607 static u64 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 2608 { 2609 /* XXX check if swapping is necessary on BE */ 2610 return *ring->wptr_cpu_addr; 2611 } 2612 2613 static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 2614 { 2615 struct amdgpu_device *adev = ring->adev; 2616 2617 /* XXX check if swapping is necessary on BE */ 2618 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); 2619 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 2620 } 2621 2622 /** 2623 * gfx_v7_0_cp_compute_enable - enable/disable the compute CP MEs 2624 * 2625 * @adev: amdgpu_device pointer 2626 * @enable: enable or disable the MEs 2627 * 2628 * Halts or unhalts the compute MEs. 2629 */ 2630 static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 2631 { 2632 if (enable) 2633 WREG32(mmCP_MEC_CNTL, 0); 2634 else 2635 WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | 2636 CP_MEC_CNTL__MEC_ME2_HALT_MASK)); 2637 udelay(50); 2638 } 2639 2640 /** 2641 * gfx_v7_0_cp_compute_load_microcode - load the compute CP ME ucode 2642 * 2643 * @adev: amdgpu_device pointer 2644 * 2645 * Loads the compute MEC1&2 ucode. 2646 * Returns 0 for success, -EINVAL if the ucode is not available. 2647 */ 2648 static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev) 2649 { 2650 const struct gfx_firmware_header_v1_0 *mec_hdr; 2651 const __le32 *fw_data; 2652 unsigned i, fw_size; 2653 2654 if (!adev->gfx.mec_fw) 2655 return -EINVAL; 2656 2657 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 2658 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 2659 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); 2660 adev->gfx.mec_feature_version = le32_to_cpu( 2661 mec_hdr->ucode_feature_version); 2662 2663 gfx_v7_0_cp_compute_enable(adev, false); 2664 2665 /* MEC1 */ 2666 fw_data = (const __le32 *) 2667 (adev->gfx.mec_fw->data + 2668 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 2669 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; 2670 WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0); 2671 for (i = 0; i < fw_size; i++) 2672 WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++)); 2673 WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0); 2674 2675 if (adev->asic_type == CHIP_KAVERI) { 2676 const struct gfx_firmware_header_v1_0 *mec2_hdr; 2677 2678 if (!adev->gfx.mec2_fw) 2679 return -EINVAL; 2680 2681 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; 2682 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); 2683 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); 2684 adev->gfx.mec2_feature_version = le32_to_cpu( 2685 mec2_hdr->ucode_feature_version); 2686 2687 /* MEC2 */ 2688 fw_data = (const __le32 *) 2689 (adev->gfx.mec2_fw->data + 2690 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes)); 2691 fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4; 2692 WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0); 2693 for (i = 0; i < fw_size; i++) 2694 WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++)); 2695 WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0); 2696 } 2697 2698 return 0; 2699 } 2700 2701 /** 2702 * gfx_v7_0_cp_compute_fini - stop the compute queues 2703 * 2704 * @adev: amdgpu_device pointer 2705 * 2706 * Stop the compute queues and tear down the driver queue 2707 * info. 2708 */ 2709 static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev) 2710 { 2711 int i; 2712 2713 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 2714 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; 2715 2716 amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL); 2717 } 2718 } 2719 2720 static void gfx_v7_0_mec_fini(struct amdgpu_device *adev) 2721 { 2722 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 2723 } 2724 2725 static int gfx_v7_0_mec_init(struct amdgpu_device *adev) 2726 { 2727 int r; 2728 u32 *hpd; 2729 size_t mec_hpd_size; 2730 2731 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 2732 2733 /* take ownership of the relevant compute queues */ 2734 amdgpu_gfx_compute_queue_acquire(adev); 2735 2736 /* allocate space for ALL pipes (even the ones we don't own) */ 2737 mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec 2738 * GFX7_MEC_HPD_SIZE * 2; 2739 2740 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 2741 AMDGPU_GEM_DOMAIN_VRAM | 2742 AMDGPU_GEM_DOMAIN_GTT, 2743 &adev->gfx.mec.hpd_eop_obj, 2744 &adev->gfx.mec.hpd_eop_gpu_addr, 2745 (void **)&hpd); 2746 if (r) { 2747 dev_warn(adev->dev, "(%d) create, pin or map of HDP EOP bo failed\n", r); 2748 gfx_v7_0_mec_fini(adev); 2749 return r; 2750 } 2751 2752 /* clear memory. Not sure if this is required or not */ 2753 memset(hpd, 0, mec_hpd_size); 2754 2755 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 2756 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 2757 2758 return 0; 2759 } 2760 2761 static void gfx_v7_0_compute_pipe_init(struct amdgpu_device *adev, 2762 int mec, int pipe) 2763 { 2764 u64 eop_gpu_addr; 2765 u32 tmp; 2766 size_t eop_offset = (mec * adev->gfx.mec.num_pipe_per_mec + pipe) 2767 * GFX7_MEC_HPD_SIZE * 2; 2768 2769 mutex_lock(&adev->srbm_mutex); 2770 eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + eop_offset; 2771 2772 cik_srbm_select(adev, mec + 1, pipe, 0, 0); 2773 2774 /* write the EOP addr */ 2775 WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8); 2776 WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8); 2777 2778 /* set the VMID assigned */ 2779 WREG32(mmCP_HPD_EOP_VMID, 0); 2780 2781 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 2782 tmp = RREG32(mmCP_HPD_EOP_CONTROL); 2783 tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK; 2784 tmp |= order_base_2(GFX7_MEC_HPD_SIZE / 8); 2785 WREG32(mmCP_HPD_EOP_CONTROL, tmp); 2786 2787 cik_srbm_select(adev, 0, 0, 0, 0); 2788 mutex_unlock(&adev->srbm_mutex); 2789 } 2790 2791 static int gfx_v7_0_mqd_deactivate(struct amdgpu_device *adev) 2792 { 2793 int i; 2794 2795 /* disable the queue if it's active */ 2796 if (RREG32(mmCP_HQD_ACTIVE) & 1) { 2797 WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1); 2798 for (i = 0; i < adev->usec_timeout; i++) { 2799 if (!(RREG32(mmCP_HQD_ACTIVE) & 1)) 2800 break; 2801 udelay(1); 2802 } 2803 2804 if (i == adev->usec_timeout) 2805 return -ETIMEDOUT; 2806 2807 WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0); 2808 WREG32(mmCP_HQD_PQ_RPTR, 0); 2809 WREG32(mmCP_HQD_PQ_WPTR, 0); 2810 } 2811 2812 return 0; 2813 } 2814 2815 static void gfx_v7_0_mqd_init(struct amdgpu_device *adev, 2816 struct cik_mqd *mqd, 2817 uint64_t mqd_gpu_addr, 2818 struct amdgpu_ring *ring) 2819 { 2820 u64 hqd_gpu_addr; 2821 u64 wb_gpu_addr; 2822 2823 /* init the mqd struct */ 2824 memset(mqd, 0, sizeof(struct cik_mqd)); 2825 2826 mqd->header = 0xC0310800; 2827 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 2828 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 2829 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 2830 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 2831 2832 /* enable doorbell? */ 2833 mqd->cp_hqd_pq_doorbell_control = 2834 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); 2835 if (ring->use_doorbell) 2836 mqd->cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK; 2837 else 2838 mqd->cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK; 2839 2840 /* set the pointer to the MQD */ 2841 mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc; 2842 mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr); 2843 2844 /* set MQD vmid to 0 */ 2845 mqd->cp_mqd_control = RREG32(mmCP_MQD_CONTROL); 2846 mqd->cp_mqd_control &= ~CP_MQD_CONTROL__VMID_MASK; 2847 2848 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 2849 hqd_gpu_addr = ring->gpu_addr >> 8; 2850 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 2851 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 2852 2853 /* set up the HQD, this is similar to CP_RB0_CNTL */ 2854 mqd->cp_hqd_pq_control = RREG32(mmCP_HQD_PQ_CONTROL); 2855 mqd->cp_hqd_pq_control &= 2856 ~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK | 2857 CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK); 2858 2859 mqd->cp_hqd_pq_control |= 2860 order_base_2(ring->ring_size / 8); 2861 mqd->cp_hqd_pq_control |= 2862 (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8); 2863 #ifdef __BIG_ENDIAN 2864 mqd->cp_hqd_pq_control |= 2865 2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT; 2866 #endif 2867 mqd->cp_hqd_pq_control &= 2868 ~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK | 2869 CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK | 2870 CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK); 2871 mqd->cp_hqd_pq_control |= 2872 CP_HQD_PQ_CONTROL__PRIV_STATE_MASK | 2873 CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */ 2874 2875 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 2876 wb_gpu_addr = ring->wptr_gpu_addr; 2877 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 2878 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 2879 2880 /* set the wb address whether it's enabled or not */ 2881 wb_gpu_addr = ring->rptr_gpu_addr; 2882 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 2883 mqd->cp_hqd_pq_rptr_report_addr_hi = 2884 upper_32_bits(wb_gpu_addr) & 0xffff; 2885 2886 /* enable the doorbell if requested */ 2887 if (ring->use_doorbell) { 2888 mqd->cp_hqd_pq_doorbell_control = 2889 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); 2890 mqd->cp_hqd_pq_doorbell_control &= 2891 ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK; 2892 mqd->cp_hqd_pq_doorbell_control |= 2893 (ring->doorbell_index << 2894 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT); 2895 mqd->cp_hqd_pq_doorbell_control |= 2896 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK; 2897 mqd->cp_hqd_pq_doorbell_control &= 2898 ~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK | 2899 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK); 2900 2901 } else { 2902 mqd->cp_hqd_pq_doorbell_control = 0; 2903 } 2904 2905 /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2906 ring->wptr = 0; 2907 mqd->cp_hqd_pq_wptr = lower_32_bits(ring->wptr); 2908 mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR); 2909 2910 /* set the vmid for the queue */ 2911 mqd->cp_hqd_vmid = 0; 2912 2913 /* defaults */ 2914 mqd->cp_hqd_ib_control = RREG32(mmCP_HQD_IB_CONTROL); 2915 mqd->cp_hqd_ib_base_addr_lo = RREG32(mmCP_HQD_IB_BASE_ADDR); 2916 mqd->cp_hqd_ib_base_addr_hi = RREG32(mmCP_HQD_IB_BASE_ADDR_HI); 2917 mqd->cp_hqd_ib_rptr = RREG32(mmCP_HQD_IB_RPTR); 2918 mqd->cp_hqd_persistent_state = RREG32(mmCP_HQD_PERSISTENT_STATE); 2919 mqd->cp_hqd_sema_cmd = RREG32(mmCP_HQD_SEMA_CMD); 2920 mqd->cp_hqd_msg_type = RREG32(mmCP_HQD_MSG_TYPE); 2921 mqd->cp_hqd_atomic0_preop_lo = RREG32(mmCP_HQD_ATOMIC0_PREOP_LO); 2922 mqd->cp_hqd_atomic0_preop_hi = RREG32(mmCP_HQD_ATOMIC0_PREOP_HI); 2923 mqd->cp_hqd_atomic1_preop_lo = RREG32(mmCP_HQD_ATOMIC1_PREOP_LO); 2924 mqd->cp_hqd_atomic1_preop_hi = RREG32(mmCP_HQD_ATOMIC1_PREOP_HI); 2925 mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR); 2926 mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM); 2927 mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY); 2928 mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY); 2929 mqd->cp_hqd_iq_rptr = RREG32(mmCP_HQD_IQ_RPTR); 2930 2931 /* activate the queue */ 2932 mqd->cp_hqd_active = 1; 2933 } 2934 2935 static int gfx_v7_0_mqd_commit(struct amdgpu_device *adev, struct cik_mqd *mqd) 2936 { 2937 uint32_t tmp; 2938 uint32_t mqd_reg; 2939 uint32_t *mqd_data; 2940 2941 /* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_MQD_CONTROL */ 2942 mqd_data = &mqd->cp_mqd_base_addr_lo; 2943 2944 /* disable wptr polling */ 2945 tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL); 2946 tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0); 2947 WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp); 2948 2949 /* program all HQD registers */ 2950 for (mqd_reg = mmCP_HQD_VMID; mqd_reg <= mmCP_MQD_CONTROL; mqd_reg++) 2951 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]); 2952 2953 /* activate the HQD */ 2954 for (mqd_reg = mmCP_MQD_BASE_ADDR; mqd_reg <= mmCP_HQD_ACTIVE; mqd_reg++) 2955 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]); 2956 2957 return 0; 2958 } 2959 2960 static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id) 2961 { 2962 int r; 2963 u64 mqd_gpu_addr; 2964 struct cik_mqd *mqd; 2965 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 2966 2967 r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE, 2968 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, 2969 &mqd_gpu_addr, (void **)&mqd); 2970 if (r) { 2971 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); 2972 return r; 2973 } 2974 2975 mutex_lock(&adev->srbm_mutex); 2976 cik_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 2977 2978 gfx_v7_0_mqd_init(adev, mqd, mqd_gpu_addr, ring); 2979 gfx_v7_0_mqd_deactivate(adev); 2980 gfx_v7_0_mqd_commit(adev, mqd); 2981 2982 cik_srbm_select(adev, 0, 0, 0, 0); 2983 mutex_unlock(&adev->srbm_mutex); 2984 2985 amdgpu_bo_kunmap(ring->mqd_obj); 2986 amdgpu_bo_unreserve(ring->mqd_obj); 2987 return 0; 2988 } 2989 2990 /** 2991 * gfx_v7_0_cp_compute_resume - setup the compute queue registers 2992 * 2993 * @adev: amdgpu_device pointer 2994 * 2995 * Program the compute queues and test them to make sure they 2996 * are working. 2997 * Returns 0 for success, error for failure. 2998 */ 2999 static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev) 3000 { 3001 int r, i, j; 3002 u32 tmp; 3003 struct amdgpu_ring *ring; 3004 3005 /* fix up chicken bits */ 3006 tmp = RREG32(mmCP_CPF_DEBUG); 3007 tmp |= (1 << 23); 3008 WREG32(mmCP_CPF_DEBUG, tmp); 3009 3010 /* init all pipes (even the ones we don't own) */ 3011 for (i = 0; i < adev->gfx.mec.num_mec; i++) 3012 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) 3013 gfx_v7_0_compute_pipe_init(adev, i, j); 3014 3015 /* init the queues */ 3016 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3017 r = gfx_v7_0_compute_queue_init(adev, i); 3018 if (r) { 3019 gfx_v7_0_cp_compute_fini(adev); 3020 return r; 3021 } 3022 } 3023 3024 gfx_v7_0_cp_compute_enable(adev, true); 3025 3026 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3027 ring = &adev->gfx.compute_ring[i]; 3028 amdgpu_ring_test_helper(ring); 3029 } 3030 3031 return 0; 3032 } 3033 3034 static void gfx_v7_0_cp_enable(struct amdgpu_device *adev, bool enable) 3035 { 3036 gfx_v7_0_cp_gfx_enable(adev, enable); 3037 gfx_v7_0_cp_compute_enable(adev, enable); 3038 } 3039 3040 static int gfx_v7_0_cp_load_microcode(struct amdgpu_device *adev) 3041 { 3042 int r; 3043 3044 r = gfx_v7_0_cp_gfx_load_microcode(adev); 3045 if (r) 3046 return r; 3047 r = gfx_v7_0_cp_compute_load_microcode(adev); 3048 if (r) 3049 return r; 3050 3051 return 0; 3052 } 3053 3054 static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 3055 bool enable) 3056 { 3057 u32 tmp = RREG32(mmCP_INT_CNTL_RING0); 3058 3059 if (enable) 3060 tmp |= (CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK | 3061 CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK); 3062 else 3063 tmp &= ~(CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK | 3064 CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK); 3065 WREG32(mmCP_INT_CNTL_RING0, tmp); 3066 } 3067 3068 static int gfx_v7_0_cp_resume(struct amdgpu_device *adev) 3069 { 3070 int r; 3071 3072 gfx_v7_0_enable_gui_idle_interrupt(adev, false); 3073 3074 r = gfx_v7_0_cp_load_microcode(adev); 3075 if (r) 3076 return r; 3077 3078 r = gfx_v7_0_cp_gfx_resume(adev); 3079 if (r) 3080 return r; 3081 r = gfx_v7_0_cp_compute_resume(adev); 3082 if (r) 3083 return r; 3084 3085 gfx_v7_0_enable_gui_idle_interrupt(adev, true); 3086 3087 return 0; 3088 } 3089 3090 /** 3091 * gfx_v7_0_ring_emit_pipeline_sync - cik vm flush using the CP 3092 * 3093 * @ring: the ring to emit the commands to 3094 * 3095 * Sync the command pipeline with the PFP. E.g. wait for everything 3096 * to be completed. 3097 */ 3098 static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 3099 { 3100 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 3101 uint32_t seq = ring->fence_drv.sync_seq; 3102 uint64_t addr = ring->fence_drv.gpu_addr; 3103 3104 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 3105 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ 3106 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 3107 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */ 3108 amdgpu_ring_write(ring, addr & 0xfffffffc); 3109 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 3110 amdgpu_ring_write(ring, seq); 3111 amdgpu_ring_write(ring, 0xffffffff); 3112 amdgpu_ring_write(ring, 4); /* poll interval */ 3113 3114 if (usepfp) { 3115 /* sync CE with ME to prevent CE fetch CEIB before context switch done */ 3116 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 3117 amdgpu_ring_write(ring, 0); 3118 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 3119 amdgpu_ring_write(ring, 0); 3120 } 3121 } 3122 3123 /* 3124 * vm 3125 * VMID 0 is the physical GPU addresses as used by the kernel. 3126 * VMIDs 1-15 are used for userspace clients and are handled 3127 * by the amdgpu vm/hsa code. 3128 */ 3129 /** 3130 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP 3131 * 3132 * @ring: amdgpu_ring pointer 3133 * @vmid: vmid number to use 3134 * @pd_addr: address 3135 * 3136 * Update the page table base and flush the VM TLB 3137 * using the CP (CIK). 3138 */ 3139 static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 3140 unsigned vmid, uint64_t pd_addr) 3141 { 3142 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 3143 3144 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 3145 3146 /* wait for the invalidate to complete */ 3147 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 3148 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ 3149 WAIT_REG_MEM_FUNCTION(0) | /* always */ 3150 WAIT_REG_MEM_ENGINE(0))); /* me */ 3151 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); 3152 amdgpu_ring_write(ring, 0); 3153 amdgpu_ring_write(ring, 0); /* ref */ 3154 amdgpu_ring_write(ring, 0); /* mask */ 3155 amdgpu_ring_write(ring, 0x20); /* poll interval */ 3156 3157 /* compute doesn't have PFP */ 3158 if (usepfp) { 3159 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 3160 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 3161 amdgpu_ring_write(ring, 0x0); 3162 3163 /* synce CE with ME to prevent CE fetch CEIB before context switch done */ 3164 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 3165 amdgpu_ring_write(ring, 0); 3166 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 3167 amdgpu_ring_write(ring, 0); 3168 } 3169 } 3170 3171 static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring, 3172 uint32_t reg, uint32_t val) 3173 { 3174 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 3175 3176 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3177 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | 3178 WRITE_DATA_DST_SEL(0))); 3179 amdgpu_ring_write(ring, reg); 3180 amdgpu_ring_write(ring, 0); 3181 amdgpu_ring_write(ring, val); 3182 } 3183 3184 /* 3185 * RLC 3186 * The RLC is a multi-purpose microengine that handles a 3187 * variety of functions. 3188 */ 3189 static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) 3190 { 3191 const u32 *src_ptr; 3192 u32 dws; 3193 const struct cs_section_def *cs_data; 3194 int r; 3195 3196 /* allocate rlc buffers */ 3197 if (adev->flags & AMD_IS_APU) { 3198 if (adev->asic_type == CHIP_KAVERI) { 3199 adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list; 3200 adev->gfx.rlc.reg_list_size = 3201 (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list); 3202 } else { 3203 adev->gfx.rlc.reg_list = kalindi_rlc_save_restore_register_list; 3204 adev->gfx.rlc.reg_list_size = 3205 (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list); 3206 } 3207 } 3208 adev->gfx.rlc.cs_data = ci_cs_data; 3209 adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */ 3210 adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */ 3211 3212 src_ptr = adev->gfx.rlc.reg_list; 3213 dws = adev->gfx.rlc.reg_list_size; 3214 dws += (5 * 16) + 48 + 48 + 64; 3215 3216 cs_data = adev->gfx.rlc.cs_data; 3217 3218 if (src_ptr) { 3219 /* init save restore block */ 3220 r = amdgpu_gfx_rlc_init_sr(adev, dws); 3221 if (r) 3222 return r; 3223 } 3224 3225 if (cs_data) { 3226 /* init clear state block */ 3227 r = amdgpu_gfx_rlc_init_csb(adev); 3228 if (r) 3229 return r; 3230 } 3231 3232 if (adev->gfx.rlc.cp_table_size) { 3233 r = amdgpu_gfx_rlc_init_cpt(adev); 3234 if (r) 3235 return r; 3236 } 3237 3238 /* init spm vmid with 0xf */ 3239 if (adev->gfx.rlc.funcs->update_spm_vmid) 3240 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0, NULL, 0xf); 3241 3242 return 0; 3243 } 3244 3245 static void gfx_v7_0_enable_lbpw(struct amdgpu_device *adev, bool enable) 3246 { 3247 u32 tmp; 3248 3249 tmp = RREG32(mmRLC_LB_CNTL); 3250 if (enable) 3251 tmp |= RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK; 3252 else 3253 tmp &= ~RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK; 3254 WREG32(mmRLC_LB_CNTL, tmp); 3255 } 3256 3257 static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) 3258 { 3259 u32 i, j, k; 3260 u32 mask; 3261 3262 mutex_lock(&adev->grbm_idx_mutex); 3263 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 3264 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 3265 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); 3266 for (k = 0; k < adev->usec_timeout; k++) { 3267 if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) 3268 break; 3269 udelay(1); 3270 } 3271 } 3272 } 3273 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 3274 mutex_unlock(&adev->grbm_idx_mutex); 3275 3276 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | 3277 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK | 3278 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | 3279 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; 3280 for (k = 0; k < adev->usec_timeout; k++) { 3281 if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) 3282 break; 3283 udelay(1); 3284 } 3285 } 3286 3287 static void gfx_v7_0_update_rlc(struct amdgpu_device *adev, u32 rlc) 3288 { 3289 u32 tmp; 3290 3291 tmp = RREG32(mmRLC_CNTL); 3292 if (tmp != rlc) 3293 WREG32(mmRLC_CNTL, rlc); 3294 } 3295 3296 static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev) 3297 { 3298 u32 data, orig; 3299 3300 orig = data = RREG32(mmRLC_CNTL); 3301 3302 if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) { 3303 u32 i; 3304 3305 data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK; 3306 WREG32(mmRLC_CNTL, data); 3307 3308 for (i = 0; i < adev->usec_timeout; i++) { 3309 if ((RREG32(mmRLC_GPM_STAT) & RLC_GPM_STAT__RLC_BUSY_MASK) == 0) 3310 break; 3311 udelay(1); 3312 } 3313 3314 gfx_v7_0_wait_for_rlc_serdes(adev); 3315 } 3316 3317 return orig; 3318 } 3319 3320 static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev) 3321 { 3322 return true; 3323 } 3324 3325 static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id) 3326 { 3327 u32 tmp, i, mask; 3328 3329 tmp = 0x1 | (1 << 1); 3330 WREG32(mmRLC_GPR_REG2, tmp); 3331 3332 mask = RLC_GPM_STAT__GFX_POWER_STATUS_MASK | 3333 RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK; 3334 for (i = 0; i < adev->usec_timeout; i++) { 3335 if ((RREG32(mmRLC_GPM_STAT) & mask) == mask) 3336 break; 3337 udelay(1); 3338 } 3339 3340 for (i = 0; i < adev->usec_timeout; i++) { 3341 if ((RREG32(mmRLC_GPR_REG2) & 0x1) == 0) 3342 break; 3343 udelay(1); 3344 } 3345 } 3346 3347 static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id) 3348 { 3349 u32 tmp; 3350 3351 tmp = 0x1 | (0 << 1); 3352 WREG32(mmRLC_GPR_REG2, tmp); 3353 } 3354 3355 /** 3356 * gfx_v7_0_rlc_stop - stop the RLC ME 3357 * 3358 * @adev: amdgpu_device pointer 3359 * 3360 * Halt the RLC ME (MicroEngine) (CIK). 3361 */ 3362 static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev) 3363 { 3364 WREG32(mmRLC_CNTL, 0); 3365 3366 gfx_v7_0_enable_gui_idle_interrupt(adev, false); 3367 3368 gfx_v7_0_wait_for_rlc_serdes(adev); 3369 } 3370 3371 /** 3372 * gfx_v7_0_rlc_start - start the RLC ME 3373 * 3374 * @adev: amdgpu_device pointer 3375 * 3376 * Unhalt the RLC ME (MicroEngine) (CIK). 3377 */ 3378 static void gfx_v7_0_rlc_start(struct amdgpu_device *adev) 3379 { 3380 WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK); 3381 3382 gfx_v7_0_enable_gui_idle_interrupt(adev, true); 3383 3384 udelay(50); 3385 } 3386 3387 static void gfx_v7_0_rlc_reset(struct amdgpu_device *adev) 3388 { 3389 u32 tmp = RREG32(mmGRBM_SOFT_RESET); 3390 3391 tmp |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK; 3392 WREG32(mmGRBM_SOFT_RESET, tmp); 3393 udelay(50); 3394 tmp &= ~GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK; 3395 WREG32(mmGRBM_SOFT_RESET, tmp); 3396 udelay(50); 3397 } 3398 3399 /** 3400 * gfx_v7_0_rlc_resume - setup the RLC hw 3401 * 3402 * @adev: amdgpu_device pointer 3403 * 3404 * Initialize the RLC registers, load the ucode, 3405 * and start the RLC (CIK). 3406 * Returns 0 for success, -EINVAL if the ucode is not available. 3407 */ 3408 static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) 3409 { 3410 const struct rlc_firmware_header_v1_0 *hdr; 3411 const __le32 *fw_data; 3412 unsigned i, fw_size; 3413 u32 tmp; 3414 3415 if (!adev->gfx.rlc_fw) 3416 return -EINVAL; 3417 3418 hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; 3419 amdgpu_ucode_print_rlc_hdr(&hdr->header); 3420 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); 3421 adev->gfx.rlc_feature_version = le32_to_cpu( 3422 hdr->ucode_feature_version); 3423 3424 adev->gfx.rlc.funcs->stop(adev); 3425 3426 /* disable CG */ 3427 tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc; 3428 WREG32(mmRLC_CGCG_CGLS_CTRL, tmp); 3429 3430 adev->gfx.rlc.funcs->reset(adev); 3431 3432 gfx_v7_0_init_pg(adev); 3433 3434 WREG32(mmRLC_LB_CNTR_INIT, 0); 3435 WREG32(mmRLC_LB_CNTR_MAX, 0x00008000); 3436 3437 mutex_lock(&adev->grbm_idx_mutex); 3438 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 3439 WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff); 3440 WREG32(mmRLC_LB_PARAMS, 0x00600408); 3441 WREG32(mmRLC_LB_CNTL, 0x80000004); 3442 mutex_unlock(&adev->grbm_idx_mutex); 3443 3444 WREG32(mmRLC_MC_CNTL, 0); 3445 WREG32(mmRLC_UCODE_CNTL, 0); 3446 3447 fw_data = (const __le32 *) 3448 (adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 3449 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 3450 WREG32(mmRLC_GPM_UCODE_ADDR, 0); 3451 for (i = 0; i < fw_size; i++) 3452 WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); 3453 WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 3454 3455 /* XXX - find out what chips support lbpw */ 3456 gfx_v7_0_enable_lbpw(adev, false); 3457 3458 if (adev->asic_type == CHIP_BONAIRE) 3459 WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0); 3460 3461 adev->gfx.rlc.funcs->start(adev); 3462 3463 return 0; 3464 } 3465 3466 static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, int xcc_id, 3467 struct amdgpu_ring *ring, unsigned vmid) 3468 { 3469 u32 data; 3470 3471 amdgpu_gfx_off_ctrl(adev, false); 3472 3473 data = RREG32(mmRLC_SPM_VMID); 3474 3475 data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK; 3476 data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT; 3477 3478 WREG32(mmRLC_SPM_VMID, data); 3479 3480 amdgpu_gfx_off_ctrl(adev, true); 3481 } 3482 3483 static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) 3484 { 3485 u32 data, orig, tmp, tmp2; 3486 3487 orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL); 3488 3489 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { 3490 gfx_v7_0_enable_gui_idle_interrupt(adev, true); 3491 3492 tmp = gfx_v7_0_halt_rlc(adev); 3493 3494 mutex_lock(&adev->grbm_idx_mutex); 3495 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 3496 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 3497 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 3498 tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | 3499 RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK | 3500 RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK; 3501 WREG32(mmRLC_SERDES_WR_CTRL, tmp2); 3502 mutex_unlock(&adev->grbm_idx_mutex); 3503 3504 gfx_v7_0_update_rlc(adev, tmp); 3505 3506 data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 3507 if (orig != data) 3508 WREG32(mmRLC_CGCG_CGLS_CTRL, data); 3509 3510 } else { 3511 gfx_v7_0_enable_gui_idle_interrupt(adev, false); 3512 3513 RREG32(mmCB_CGTT_SCLK_CTRL); 3514 RREG32(mmCB_CGTT_SCLK_CTRL); 3515 RREG32(mmCB_CGTT_SCLK_CTRL); 3516 RREG32(mmCB_CGTT_SCLK_CTRL); 3517 3518 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); 3519 if (orig != data) 3520 WREG32(mmRLC_CGCG_CGLS_CTRL, data); 3521 3522 gfx_v7_0_enable_gui_idle_interrupt(adev, true); 3523 } 3524 } 3525 3526 static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) 3527 { 3528 u32 data, orig, tmp = 0; 3529 3530 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { 3531 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { 3532 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { 3533 orig = data = RREG32(mmCP_MEM_SLP_CNTL); 3534 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 3535 if (orig != data) 3536 WREG32(mmCP_MEM_SLP_CNTL, data); 3537 } 3538 } 3539 3540 orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); 3541 data |= 0x00000001; 3542 data &= 0xfffffffd; 3543 if (orig != data) 3544 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data); 3545 3546 tmp = gfx_v7_0_halt_rlc(adev); 3547 3548 mutex_lock(&adev->grbm_idx_mutex); 3549 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 3550 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 3551 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 3552 data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | 3553 RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK; 3554 WREG32(mmRLC_SERDES_WR_CTRL, data); 3555 mutex_unlock(&adev->grbm_idx_mutex); 3556 3557 gfx_v7_0_update_rlc(adev, tmp); 3558 3559 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) { 3560 orig = data = RREG32(mmCGTS_SM_CTRL_REG); 3561 data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK; 3562 data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT); 3563 data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK; 3564 data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK; 3565 if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) && 3566 (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS)) 3567 data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; 3568 data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK; 3569 data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK; 3570 data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT); 3571 if (orig != data) 3572 WREG32(mmCGTS_SM_CTRL_REG, data); 3573 } 3574 } else { 3575 orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); 3576 data |= 0x00000003; 3577 if (orig != data) 3578 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data); 3579 3580 data = RREG32(mmRLC_MEM_SLP_CNTL); 3581 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { 3582 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; 3583 WREG32(mmRLC_MEM_SLP_CNTL, data); 3584 } 3585 3586 data = RREG32(mmCP_MEM_SLP_CNTL); 3587 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { 3588 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 3589 WREG32(mmCP_MEM_SLP_CNTL, data); 3590 } 3591 3592 orig = data = RREG32(mmCGTS_SM_CTRL_REG); 3593 data |= CGTS_SM_CTRL_REG__OVERRIDE_MASK | CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; 3594 if (orig != data) 3595 WREG32(mmCGTS_SM_CTRL_REG, data); 3596 3597 tmp = gfx_v7_0_halt_rlc(adev); 3598 3599 mutex_lock(&adev->grbm_idx_mutex); 3600 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 3601 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 3602 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 3603 data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK; 3604 WREG32(mmRLC_SERDES_WR_CTRL, data); 3605 mutex_unlock(&adev->grbm_idx_mutex); 3606 3607 gfx_v7_0_update_rlc(adev, tmp); 3608 } 3609 } 3610 3611 static void gfx_v7_0_update_cg(struct amdgpu_device *adev, 3612 bool enable) 3613 { 3614 gfx_v7_0_enable_gui_idle_interrupt(adev, false); 3615 /* order matters! */ 3616 if (enable) { 3617 gfx_v7_0_enable_mgcg(adev, true); 3618 gfx_v7_0_enable_cgcg(adev, true); 3619 } else { 3620 gfx_v7_0_enable_cgcg(adev, false); 3621 gfx_v7_0_enable_mgcg(adev, false); 3622 } 3623 gfx_v7_0_enable_gui_idle_interrupt(adev, true); 3624 } 3625 3626 static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev, 3627 bool enable) 3628 { 3629 u32 data, orig; 3630 3631 orig = data = RREG32(mmRLC_PG_CNTL); 3632 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS)) 3633 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; 3634 else 3635 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; 3636 if (orig != data) 3637 WREG32(mmRLC_PG_CNTL, data); 3638 } 3639 3640 static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev, 3641 bool enable) 3642 { 3643 u32 data, orig; 3644 3645 orig = data = RREG32(mmRLC_PG_CNTL); 3646 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS)) 3647 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; 3648 else 3649 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; 3650 if (orig != data) 3651 WREG32(mmRLC_PG_CNTL, data); 3652 } 3653 3654 static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable) 3655 { 3656 u32 data, orig; 3657 3658 orig = data = RREG32(mmRLC_PG_CNTL); 3659 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP)) 3660 data &= ~0x8000; 3661 else 3662 data |= 0x8000; 3663 if (orig != data) 3664 WREG32(mmRLC_PG_CNTL, data); 3665 } 3666 3667 static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable) 3668 { 3669 u32 data, orig; 3670 3671 orig = data = RREG32(mmRLC_PG_CNTL); 3672 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS)) 3673 data &= ~0x2000; 3674 else 3675 data |= 0x2000; 3676 if (orig != data) 3677 WREG32(mmRLC_PG_CNTL, data); 3678 } 3679 3680 static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev) 3681 { 3682 if (adev->asic_type == CHIP_KAVERI) 3683 return 5; 3684 else 3685 return 4; 3686 } 3687 3688 static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, 3689 bool enable) 3690 { 3691 u32 data, orig; 3692 3693 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { 3694 orig = data = RREG32(mmRLC_PG_CNTL); 3695 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 3696 if (orig != data) 3697 WREG32(mmRLC_PG_CNTL, data); 3698 3699 orig = data = RREG32(mmRLC_AUTO_PG_CTRL); 3700 data |= RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK; 3701 if (orig != data) 3702 WREG32(mmRLC_AUTO_PG_CTRL, data); 3703 } else { 3704 orig = data = RREG32(mmRLC_PG_CNTL); 3705 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 3706 if (orig != data) 3707 WREG32(mmRLC_PG_CNTL, data); 3708 3709 orig = data = RREG32(mmRLC_AUTO_PG_CTRL); 3710 data &= ~RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK; 3711 if (orig != data) 3712 WREG32(mmRLC_AUTO_PG_CTRL, data); 3713 3714 data = RREG32(mmDB_RENDER_CONTROL); 3715 } 3716 } 3717 3718 static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, 3719 u32 bitmap) 3720 { 3721 u32 data; 3722 3723 if (!bitmap) 3724 return; 3725 3726 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 3727 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 3728 3729 WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data); 3730 } 3731 3732 static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev) 3733 { 3734 u32 data, mask; 3735 3736 data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); 3737 data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); 3738 3739 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 3740 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 3741 3742 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh); 3743 3744 return (~data) & mask; 3745 } 3746 3747 static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev) 3748 { 3749 u32 tmp; 3750 3751 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); 3752 3753 tmp = RREG32(mmRLC_MAX_PG_CU); 3754 tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK; 3755 tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT); 3756 WREG32(mmRLC_MAX_PG_CU, tmp); 3757 } 3758 3759 static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev, 3760 bool enable) 3761 { 3762 u32 data, orig; 3763 3764 orig = data = RREG32(mmRLC_PG_CNTL); 3765 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG)) 3766 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; 3767 else 3768 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; 3769 if (orig != data) 3770 WREG32(mmRLC_PG_CNTL, data); 3771 } 3772 3773 static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev, 3774 bool enable) 3775 { 3776 u32 data, orig; 3777 3778 orig = data = RREG32(mmRLC_PG_CNTL); 3779 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG)) 3780 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; 3781 else 3782 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; 3783 if (orig != data) 3784 WREG32(mmRLC_PG_CNTL, data); 3785 } 3786 3787 #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90 3788 #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D 3789 3790 static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device *adev) 3791 { 3792 u32 data, orig; 3793 u32 i; 3794 3795 if (adev->gfx.rlc.cs_data) { 3796 WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET); 3797 WREG32(mmRLC_GPM_SCRATCH_DATA, upper_32_bits(adev->gfx.rlc.clear_state_gpu_addr)); 3798 WREG32(mmRLC_GPM_SCRATCH_DATA, lower_32_bits(adev->gfx.rlc.clear_state_gpu_addr)); 3799 WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.clear_state_size); 3800 } else { 3801 WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET); 3802 for (i = 0; i < 3; i++) 3803 WREG32(mmRLC_GPM_SCRATCH_DATA, 0); 3804 } 3805 if (adev->gfx.rlc.reg_list) { 3806 WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET); 3807 for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) 3808 WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.reg_list[i]); 3809 } 3810 3811 orig = data = RREG32(mmRLC_PG_CNTL); 3812 data |= RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK; 3813 if (orig != data) 3814 WREG32(mmRLC_PG_CNTL, data); 3815 3816 WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8); 3817 WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8); 3818 3819 data = RREG32(mmCP_RB_WPTR_POLL_CNTL); 3820 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK; 3821 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 3822 WREG32(mmCP_RB_WPTR_POLL_CNTL, data); 3823 3824 data = 0x10101010; 3825 WREG32(mmRLC_PG_DELAY, data); 3826 3827 data = RREG32(mmRLC_PG_DELAY_2); 3828 data &= ~0xff; 3829 data |= 0x3; 3830 WREG32(mmRLC_PG_DELAY_2, data); 3831 3832 data = RREG32(mmRLC_AUTO_PG_CTRL); 3833 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK; 3834 data |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT); 3835 WREG32(mmRLC_AUTO_PG_CTRL, data); 3836 3837 } 3838 3839 static void gfx_v7_0_update_gfx_pg(struct amdgpu_device *adev, bool enable) 3840 { 3841 gfx_v7_0_enable_gfx_cgpg(adev, enable); 3842 gfx_v7_0_enable_gfx_static_mgpg(adev, enable); 3843 gfx_v7_0_enable_gfx_dynamic_mgpg(adev, enable); 3844 } 3845 3846 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev) 3847 { 3848 u32 count = 0; 3849 const struct cs_section_def *sect = NULL; 3850 const struct cs_extent_def *ext = NULL; 3851 3852 if (adev->gfx.rlc.cs_data == NULL) 3853 return 0; 3854 3855 /* begin clear state */ 3856 count += 2; 3857 /* context control state */ 3858 count += 3; 3859 3860 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 3861 for (ext = sect->section; ext->extent != NULL; ++ext) { 3862 if (sect->id == SECT_CONTEXT) 3863 count += 2 + ext->reg_count; 3864 else 3865 return 0; 3866 } 3867 } 3868 /* pa_sc_raster_config/pa_sc_raster_config1 */ 3869 count += 4; 3870 /* end clear state */ 3871 count += 2; 3872 /* clear state */ 3873 count += 2; 3874 3875 return count; 3876 } 3877 3878 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) 3879 { 3880 u32 count = 0; 3881 3882 if (adev->gfx.rlc.cs_data == NULL) 3883 return; 3884 if (buffer == NULL) 3885 return; 3886 3887 count = amdgpu_gfx_csb_preamble_start(buffer); 3888 count = amdgpu_gfx_csb_data_parser(adev, buffer, count); 3889 3890 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 3891 buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); 3892 buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config); 3893 buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1); 3894 3895 amdgpu_gfx_csb_preamble_end(buffer, count); 3896 } 3897 3898 static void gfx_v7_0_init_pg(struct amdgpu_device *adev) 3899 { 3900 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | 3901 AMD_PG_SUPPORT_GFX_SMG | 3902 AMD_PG_SUPPORT_GFX_DMG | 3903 AMD_PG_SUPPORT_CP | 3904 AMD_PG_SUPPORT_GDS | 3905 AMD_PG_SUPPORT_RLC_SMU_HS)) { 3906 gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true); 3907 gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true); 3908 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) { 3909 gfx_v7_0_init_gfx_cgpg(adev); 3910 gfx_v7_0_enable_cp_pg(adev, true); 3911 gfx_v7_0_enable_gds_pg(adev, true); 3912 } 3913 gfx_v7_0_init_ao_cu_mask(adev); 3914 gfx_v7_0_update_gfx_pg(adev, true); 3915 } 3916 } 3917 3918 static void gfx_v7_0_fini_pg(struct amdgpu_device *adev) 3919 { 3920 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | 3921 AMD_PG_SUPPORT_GFX_SMG | 3922 AMD_PG_SUPPORT_GFX_DMG | 3923 AMD_PG_SUPPORT_CP | 3924 AMD_PG_SUPPORT_GDS | 3925 AMD_PG_SUPPORT_RLC_SMU_HS)) { 3926 gfx_v7_0_update_gfx_pg(adev, false); 3927 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) { 3928 gfx_v7_0_enable_cp_pg(adev, false); 3929 gfx_v7_0_enable_gds_pg(adev, false); 3930 } 3931 } 3932 } 3933 3934 /** 3935 * gfx_v7_0_get_gpu_clock_counter - return GPU clock counter snapshot 3936 * 3937 * @adev: amdgpu_device pointer 3938 * 3939 * Fetches a GPU clock counter snapshot (SI). 3940 * Returns the 64 bit clock counter snapshot. 3941 */ 3942 static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev) 3943 { 3944 uint64_t clock; 3945 3946 mutex_lock(&adev->gfx.gpu_clock_mutex); 3947 WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); 3948 clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) | 3949 ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); 3950 mutex_unlock(&adev->gfx.gpu_clock_mutex); 3951 return clock; 3952 } 3953 3954 static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring, 3955 uint32_t vmid, 3956 uint32_t gds_base, uint32_t gds_size, 3957 uint32_t gws_base, uint32_t gws_size, 3958 uint32_t oa_base, uint32_t oa_size) 3959 { 3960 /* GDS Base */ 3961 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3962 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 3963 WRITE_DATA_DST_SEL(0))); 3964 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base); 3965 amdgpu_ring_write(ring, 0); 3966 amdgpu_ring_write(ring, gds_base); 3967 3968 /* GDS Size */ 3969 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3970 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 3971 WRITE_DATA_DST_SEL(0))); 3972 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size); 3973 amdgpu_ring_write(ring, 0); 3974 amdgpu_ring_write(ring, gds_size); 3975 3976 /* GWS */ 3977 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3978 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 3979 WRITE_DATA_DST_SEL(0))); 3980 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws); 3981 amdgpu_ring_write(ring, 0); 3982 amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); 3983 3984 /* OA */ 3985 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3986 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 3987 WRITE_DATA_DST_SEL(0))); 3988 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa); 3989 amdgpu_ring_write(ring, 0); 3990 amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); 3991 } 3992 3993 static void gfx_v7_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid) 3994 { 3995 struct amdgpu_device *adev = ring->adev; 3996 uint32_t value = 0; 3997 3998 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); 3999 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 4000 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 4001 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 4002 WREG32(mmSQ_CMD, value); 4003 } 4004 4005 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) 4006 { 4007 WREG32(mmSQ_IND_INDEX, 4008 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 4009 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | 4010 (address << SQ_IND_INDEX__INDEX__SHIFT) | 4011 (SQ_IND_INDEX__FORCE_READ_MASK)); 4012 return RREG32(mmSQ_IND_DATA); 4013 } 4014 4015 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, 4016 uint32_t wave, uint32_t thread, 4017 uint32_t regno, uint32_t num, uint32_t *out) 4018 { 4019 WREG32(mmSQ_IND_INDEX, 4020 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 4021 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | 4022 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 4023 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) | 4024 (SQ_IND_INDEX__FORCE_READ_MASK) | 4025 (SQ_IND_INDEX__AUTO_INCR_MASK)); 4026 while (num--) 4027 *(out++) = RREG32(mmSQ_IND_DATA); 4028 } 4029 4030 static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) 4031 { 4032 /* type 0 wave data */ 4033 dst[(*no_fields)++] = 0; 4034 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS); 4035 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO); 4036 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI); 4037 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO); 4038 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI); 4039 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID); 4040 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0); 4041 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1); 4042 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC); 4043 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC); 4044 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS); 4045 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS); 4046 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO); 4047 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI); 4048 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO); 4049 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI); 4050 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); 4051 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); 4052 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE); 4053 } 4054 4055 static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 4056 uint32_t wave, uint32_t start, 4057 uint32_t size, uint32_t *dst) 4058 { 4059 wave_read_regs( 4060 adev, simd, wave, 0, 4061 start + SQIND_WAVE_SGPRS_OFFSET, size, dst); 4062 } 4063 4064 static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev, 4065 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) 4066 { 4067 cik_srbm_select(adev, me, pipe, q, vm); 4068 } 4069 4070 /** 4071 * gfx_v7_0_get_hdp_flush_mask - get the reference and mask for HDP flush 4072 * 4073 * @ring: amdgpu_ring structure holding ring information 4074 * @ref_and_mask: pointer to store the reference and mask 4075 * @reg_mem_engine: pointer to store the register memory engine 4076 * 4077 * Calculates the reference and mask for HDP flush based on the ring type and me. 4078 */ 4079 static void gfx_v7_0_get_hdp_flush_mask(struct amdgpu_ring *ring, 4080 uint32_t *ref_and_mask, uint32_t *reg_mem_engine) 4081 { 4082 if (!ring || !ref_and_mask || !reg_mem_engine) { 4083 DRM_INFO("%s:invalid params\n", __func__); 4084 return; 4085 } 4086 4087 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || 4088 ring->funcs->type == AMDGPU_RING_TYPE_KIQ) { 4089 switch (ring->me) { 4090 case 1: 4091 *ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; 4092 break; 4093 case 2: 4094 *ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe; 4095 break; 4096 default: 4097 return; 4098 } 4099 *reg_mem_engine = 0; 4100 } else { 4101 *ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK; 4102 *reg_mem_engine = 1; 4103 } 4104 } 4105 4106 static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { 4107 .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, 4108 .select_se_sh = &gfx_v7_0_select_se_sh, 4109 .read_wave_data = &gfx_v7_0_read_wave_data, 4110 .read_wave_sgprs = &gfx_v7_0_read_wave_sgprs, 4111 .select_me_pipe_q = &gfx_v7_0_select_me_pipe_q, 4112 .get_hdp_flush_mask = &gfx_v7_0_get_hdp_flush_mask, 4113 }; 4114 4115 static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { 4116 .is_rlc_enabled = gfx_v7_0_is_rlc_enabled, 4117 .set_safe_mode = gfx_v7_0_set_safe_mode, 4118 .unset_safe_mode = gfx_v7_0_unset_safe_mode, 4119 .init = gfx_v7_0_rlc_init, 4120 .get_csb_size = gfx_v7_0_get_csb_size, 4121 .get_csb_buffer = gfx_v7_0_get_csb_buffer, 4122 .get_cp_table_num = gfx_v7_0_cp_pg_table_num, 4123 .resume = gfx_v7_0_rlc_resume, 4124 .stop = gfx_v7_0_rlc_stop, 4125 .reset = gfx_v7_0_rlc_reset, 4126 .start = gfx_v7_0_rlc_start, 4127 .update_spm_vmid = gfx_v7_0_update_spm_vmid 4128 }; 4129 4130 static int gfx_v7_0_early_init(struct amdgpu_ip_block *ip_block) 4131 { 4132 struct amdgpu_device *adev = ip_block->adev; 4133 4134 adev->gfx.xcc_mask = 1; 4135 adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS; 4136 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), 4137 AMDGPU_MAX_COMPUTE_RINGS); 4138 adev->gfx.funcs = &gfx_v7_0_gfx_funcs; 4139 adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs; 4140 gfx_v7_0_set_ring_funcs(adev); 4141 gfx_v7_0_set_irq_funcs(adev); 4142 gfx_v7_0_set_gds_init(adev); 4143 4144 return 0; 4145 } 4146 4147 static int gfx_v7_0_late_init(struct amdgpu_ip_block *ip_block) 4148 { 4149 struct amdgpu_device *adev = ip_block->adev; 4150 int r; 4151 4152 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 4153 if (r) 4154 return r; 4155 4156 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 4157 if (r) 4158 return r; 4159 4160 return 0; 4161 } 4162 4163 static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) 4164 { 4165 u32 gb_addr_config; 4166 u32 mc_arb_ramcfg; 4167 u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; 4168 u32 tmp; 4169 4170 switch (adev->asic_type) { 4171 case CHIP_BONAIRE: 4172 adev->gfx.config.max_shader_engines = 2; 4173 adev->gfx.config.max_tile_pipes = 4; 4174 adev->gfx.config.max_cu_per_sh = 7; 4175 adev->gfx.config.max_sh_per_se = 1; 4176 adev->gfx.config.max_backends_per_se = 2; 4177 adev->gfx.config.max_texture_channel_caches = 4; 4178 adev->gfx.config.max_gprs = 256; 4179 adev->gfx.config.max_gs_threads = 32; 4180 adev->gfx.config.max_hw_contexts = 8; 4181 4182 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 4183 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 4184 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 4185 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 4186 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 4187 break; 4188 case CHIP_HAWAII: 4189 adev->gfx.config.max_shader_engines = 4; 4190 adev->gfx.config.max_tile_pipes = 16; 4191 adev->gfx.config.max_cu_per_sh = 11; 4192 adev->gfx.config.max_sh_per_se = 1; 4193 adev->gfx.config.max_backends_per_se = 4; 4194 adev->gfx.config.max_texture_channel_caches = 16; 4195 adev->gfx.config.max_gprs = 256; 4196 adev->gfx.config.max_gs_threads = 32; 4197 adev->gfx.config.max_hw_contexts = 8; 4198 4199 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 4200 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 4201 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 4202 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 4203 gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN; 4204 break; 4205 case CHIP_KAVERI: 4206 adev->gfx.config.max_shader_engines = 1; 4207 adev->gfx.config.max_tile_pipes = 4; 4208 adev->gfx.config.max_cu_per_sh = 8; 4209 adev->gfx.config.max_backends_per_se = 2; 4210 adev->gfx.config.max_sh_per_se = 1; 4211 adev->gfx.config.max_texture_channel_caches = 4; 4212 adev->gfx.config.max_gprs = 256; 4213 adev->gfx.config.max_gs_threads = 16; 4214 adev->gfx.config.max_hw_contexts = 8; 4215 4216 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 4217 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 4218 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 4219 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 4220 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 4221 break; 4222 case CHIP_KABINI: 4223 case CHIP_MULLINS: 4224 default: 4225 adev->gfx.config.max_shader_engines = 1; 4226 adev->gfx.config.max_tile_pipes = 2; 4227 adev->gfx.config.max_cu_per_sh = 2; 4228 adev->gfx.config.max_sh_per_se = 1; 4229 adev->gfx.config.max_backends_per_se = 1; 4230 adev->gfx.config.max_texture_channel_caches = 2; 4231 adev->gfx.config.max_gprs = 256; 4232 adev->gfx.config.max_gs_threads = 16; 4233 adev->gfx.config.max_hw_contexts = 8; 4234 4235 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 4236 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 4237 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 4238 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 4239 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 4240 break; 4241 } 4242 4243 adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); 4244 mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; 4245 4246 adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg, 4247 MC_ARB_RAMCFG, NOOFBANK); 4248 adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg, 4249 MC_ARB_RAMCFG, NOOFRANKS); 4250 4251 adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; 4252 adev->gfx.config.mem_max_burst_length_bytes = 256; 4253 if (adev->flags & AMD_IS_APU) { 4254 /* Get memory bank mapping mode. */ 4255 tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); 4256 dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); 4257 dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP); 4258 4259 tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING); 4260 dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP); 4261 dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP); 4262 4263 /* Validate settings in case only one DIMM installed. */ 4264 if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12)) 4265 dimm00_addr_map = 0; 4266 if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12)) 4267 dimm01_addr_map = 0; 4268 if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12)) 4269 dimm10_addr_map = 0; 4270 if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12)) 4271 dimm11_addr_map = 0; 4272 4273 /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */ 4274 /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */ 4275 if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11)) 4276 adev->gfx.config.mem_row_size_in_kb = 2; 4277 else 4278 adev->gfx.config.mem_row_size_in_kb = 1; 4279 } else { 4280 tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT; 4281 adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 4282 if (adev->gfx.config.mem_row_size_in_kb > 4) 4283 adev->gfx.config.mem_row_size_in_kb = 4; 4284 } 4285 /* XXX use MC settings? */ 4286 adev->gfx.config.shader_engine_tile_size = 32; 4287 adev->gfx.config.num_gpus = 1; 4288 adev->gfx.config.multi_gpu_tile_size = 64; 4289 4290 /* fix up row size */ 4291 gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK; 4292 switch (adev->gfx.config.mem_row_size_in_kb) { 4293 case 1: 4294 default: 4295 gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); 4296 break; 4297 case 2: 4298 gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); 4299 break; 4300 case 4: 4301 gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); 4302 break; 4303 } 4304 adev->gfx.config.gb_addr_config = gb_addr_config; 4305 } 4306 4307 static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 4308 int mec, int pipe, int queue) 4309 { 4310 int r; 4311 unsigned irq_type; 4312 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 4313 4314 /* mec0 is me1 */ 4315 ring->me = mec + 1; 4316 ring->pipe = pipe; 4317 ring->queue = queue; 4318 4319 ring->ring_obj = NULL; 4320 ring->use_doorbell = true; 4321 ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id; 4322 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); 4323 4324 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 4325 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 4326 + ring->pipe; 4327 4328 /* type-2 packets are deprecated on MEC, use type-3 instead */ 4329 r = amdgpu_ring_init(adev, ring, 1024, 4330 &adev->gfx.eop_irq, irq_type, 4331 AMDGPU_RING_PRIO_DEFAULT, NULL); 4332 if (r) 4333 return r; 4334 4335 4336 return 0; 4337 } 4338 4339 static int gfx_v7_0_sw_init(struct amdgpu_ip_block *ip_block) 4340 { 4341 struct amdgpu_ring *ring; 4342 struct amdgpu_device *adev = ip_block->adev; 4343 int i, j, k, r, ring_id; 4344 4345 switch (adev->asic_type) { 4346 case CHIP_KAVERI: 4347 adev->gfx.mec.num_mec = 2; 4348 break; 4349 case CHIP_BONAIRE: 4350 case CHIP_HAWAII: 4351 case CHIP_KABINI: 4352 case CHIP_MULLINS: 4353 default: 4354 adev->gfx.mec.num_mec = 1; 4355 break; 4356 } 4357 adev->gfx.mec.num_pipe_per_mec = 4; 4358 adev->gfx.mec.num_queue_per_pipe = 8; 4359 4360 /* EOP Event */ 4361 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq); 4362 if (r) 4363 return r; 4364 4365 /* Privileged reg */ 4366 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184, 4367 &adev->gfx.priv_reg_irq); 4368 if (r) 4369 return r; 4370 4371 /* Privileged inst */ 4372 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185, 4373 &adev->gfx.priv_inst_irq); 4374 if (r) 4375 return r; 4376 4377 r = gfx_v7_0_init_microcode(adev); 4378 if (r) { 4379 DRM_ERROR("Failed to load gfx firmware!\n"); 4380 return r; 4381 } 4382 4383 r = adev->gfx.rlc.funcs->init(adev); 4384 if (r) { 4385 DRM_ERROR("Failed to init rlc BOs!\n"); 4386 return r; 4387 } 4388 4389 /* allocate mec buffers */ 4390 r = gfx_v7_0_mec_init(adev); 4391 if (r) { 4392 DRM_ERROR("Failed to init MEC BOs!\n"); 4393 return r; 4394 } 4395 4396 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4397 ring = &adev->gfx.gfx_ring[i]; 4398 ring->ring_obj = NULL; 4399 sprintf(ring->name, "gfx"); 4400 r = amdgpu_ring_init(adev, ring, 1024, 4401 &adev->gfx.eop_irq, 4402 AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, 4403 AMDGPU_RING_PRIO_DEFAULT, NULL); 4404 if (r) 4405 return r; 4406 } 4407 4408 /* set up the compute queues - allocate horizontally across pipes */ 4409 ring_id = 0; 4410 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 4411 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 4412 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 4413 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, 4414 k, j)) 4415 continue; 4416 4417 r = gfx_v7_0_compute_ring_init(adev, 4418 ring_id, 4419 i, k, j); 4420 if (r) 4421 return r; 4422 4423 ring_id++; 4424 } 4425 } 4426 } 4427 4428 adev->gfx.ce_ram_size = 0x8000; 4429 4430 gfx_v7_0_gpu_early_init(adev); 4431 4432 adev->gfx.gfx_supported_reset = 4433 amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); 4434 adev->gfx.compute_supported_reset = 4435 amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); 4436 4437 return r; 4438 } 4439 4440 static int gfx_v7_0_sw_fini(struct amdgpu_ip_block *ip_block) 4441 { 4442 struct amdgpu_device *adev = ip_block->adev; 4443 int i; 4444 4445 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 4446 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); 4447 for (i = 0; i < adev->gfx.num_compute_rings; i++) 4448 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 4449 4450 gfx_v7_0_cp_compute_fini(adev); 4451 amdgpu_gfx_rlc_fini(adev); 4452 gfx_v7_0_mec_fini(adev); 4453 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 4454 &adev->gfx.rlc.clear_state_gpu_addr, 4455 (void **)&adev->gfx.rlc.cs_ptr); 4456 if (adev->gfx.rlc.cp_table_size) { 4457 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 4458 &adev->gfx.rlc.cp_table_gpu_addr, 4459 (void **)&adev->gfx.rlc.cp_table_ptr); 4460 } 4461 gfx_v7_0_free_microcode(adev); 4462 4463 return 0; 4464 } 4465 4466 static int gfx_v7_0_hw_init(struct amdgpu_ip_block *ip_block) 4467 { 4468 int r; 4469 struct amdgpu_device *adev = ip_block->adev; 4470 4471 gfx_v7_0_constants_init(adev); 4472 4473 /* init CSB */ 4474 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); 4475 /* init rlc */ 4476 r = adev->gfx.rlc.funcs->resume(adev); 4477 if (r) 4478 return r; 4479 4480 r = gfx_v7_0_cp_resume(adev); 4481 if (r) 4482 return r; 4483 4484 return r; 4485 } 4486 4487 static int gfx_v7_0_hw_fini(struct amdgpu_ip_block *ip_block) 4488 { 4489 struct amdgpu_device *adev = ip_block->adev; 4490 4491 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 4492 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 4493 gfx_v7_0_cp_enable(adev, false); 4494 adev->gfx.rlc.funcs->stop(adev); 4495 gfx_v7_0_fini_pg(adev); 4496 4497 return 0; 4498 } 4499 4500 static int gfx_v7_0_suspend(struct amdgpu_ip_block *ip_block) 4501 { 4502 return gfx_v7_0_hw_fini(ip_block); 4503 } 4504 4505 static int gfx_v7_0_resume(struct amdgpu_ip_block *ip_block) 4506 { 4507 return gfx_v7_0_hw_init(ip_block); 4508 } 4509 4510 static bool gfx_v7_0_is_idle(struct amdgpu_ip_block *ip_block) 4511 { 4512 struct amdgpu_device *adev = ip_block->adev; 4513 4514 if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK) 4515 return false; 4516 else 4517 return true; 4518 } 4519 4520 static int gfx_v7_0_wait_for_idle(struct amdgpu_ip_block *ip_block) 4521 { 4522 unsigned i; 4523 u32 tmp; 4524 struct amdgpu_device *adev = ip_block->adev; 4525 4526 for (i = 0; i < adev->usec_timeout; i++) { 4527 /* read MC_STATUS */ 4528 tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK; 4529 4530 if (!tmp) 4531 return 0; 4532 udelay(1); 4533 } 4534 return -ETIMEDOUT; 4535 } 4536 4537 static int gfx_v7_0_soft_reset(struct amdgpu_ip_block *ip_block) 4538 { 4539 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 4540 u32 tmp; 4541 struct amdgpu_device *adev = ip_block->adev; 4542 4543 /* GRBM_STATUS */ 4544 tmp = RREG32(mmGRBM_STATUS); 4545 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | 4546 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | 4547 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | 4548 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | 4549 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | 4550 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) 4551 grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK | 4552 GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK; 4553 4554 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { 4555 grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK; 4556 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK; 4557 } 4558 4559 /* GRBM_STATUS2 */ 4560 tmp = RREG32(mmGRBM_STATUS2); 4561 if (tmp & GRBM_STATUS2__RLC_BUSY_MASK) 4562 grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK; 4563 4564 /* SRBM_STATUS */ 4565 tmp = RREG32(mmSRBM_STATUS); 4566 if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK) 4567 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK; 4568 4569 if (grbm_soft_reset || srbm_soft_reset) { 4570 /* disable CG/PG */ 4571 gfx_v7_0_fini_pg(adev); 4572 gfx_v7_0_update_cg(adev, false); 4573 4574 /* stop the rlc */ 4575 adev->gfx.rlc.funcs->stop(adev); 4576 4577 /* Disable GFX parsing/prefetching */ 4578 WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); 4579 4580 /* Disable MEC parsing/prefetching */ 4581 WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK); 4582 4583 if (grbm_soft_reset) { 4584 tmp = RREG32(mmGRBM_SOFT_RESET); 4585 tmp |= grbm_soft_reset; 4586 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); 4587 WREG32(mmGRBM_SOFT_RESET, tmp); 4588 tmp = RREG32(mmGRBM_SOFT_RESET); 4589 4590 udelay(50); 4591 4592 tmp &= ~grbm_soft_reset; 4593 WREG32(mmGRBM_SOFT_RESET, tmp); 4594 tmp = RREG32(mmGRBM_SOFT_RESET); 4595 } 4596 4597 if (srbm_soft_reset) { 4598 tmp = RREG32(mmSRBM_SOFT_RESET); 4599 tmp |= srbm_soft_reset; 4600 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 4601 WREG32(mmSRBM_SOFT_RESET, tmp); 4602 tmp = RREG32(mmSRBM_SOFT_RESET); 4603 4604 udelay(50); 4605 4606 tmp &= ~srbm_soft_reset; 4607 WREG32(mmSRBM_SOFT_RESET, tmp); 4608 tmp = RREG32(mmSRBM_SOFT_RESET); 4609 } 4610 /* Wait a little for things to settle down */ 4611 udelay(50); 4612 } 4613 return 0; 4614 } 4615 4616 static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 4617 enum amdgpu_interrupt_state state) 4618 { 4619 u32 cp_int_cntl; 4620 4621 switch (state) { 4622 case AMDGPU_IRQ_STATE_DISABLE: 4623 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); 4624 cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK; 4625 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); 4626 break; 4627 case AMDGPU_IRQ_STATE_ENABLE: 4628 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); 4629 cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK; 4630 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); 4631 break; 4632 default: 4633 break; 4634 } 4635 } 4636 4637 static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 4638 int me, int pipe, 4639 enum amdgpu_interrupt_state state) 4640 { 4641 u32 mec_int_cntl, mec_int_cntl_reg; 4642 4643 /* 4644 * amdgpu controls only the first MEC. That's why this function only 4645 * handles the setting of interrupts for this specific MEC. All other 4646 * pipes' interrupts are set by amdkfd. 4647 */ 4648 4649 if (me == 1) { 4650 switch (pipe) { 4651 case 0: 4652 mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL; 4653 break; 4654 case 1: 4655 mec_int_cntl_reg = mmCP_ME1_PIPE1_INT_CNTL; 4656 break; 4657 case 2: 4658 mec_int_cntl_reg = mmCP_ME1_PIPE2_INT_CNTL; 4659 break; 4660 case 3: 4661 mec_int_cntl_reg = mmCP_ME1_PIPE3_INT_CNTL; 4662 break; 4663 default: 4664 DRM_DEBUG("invalid pipe %d\n", pipe); 4665 return; 4666 } 4667 } else { 4668 DRM_DEBUG("invalid me %d\n", me); 4669 return; 4670 } 4671 4672 switch (state) { 4673 case AMDGPU_IRQ_STATE_DISABLE: 4674 mec_int_cntl = RREG32(mec_int_cntl_reg); 4675 mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK; 4676 WREG32(mec_int_cntl_reg, mec_int_cntl); 4677 break; 4678 case AMDGPU_IRQ_STATE_ENABLE: 4679 mec_int_cntl = RREG32(mec_int_cntl_reg); 4680 mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK; 4681 WREG32(mec_int_cntl_reg, mec_int_cntl); 4682 break; 4683 default: 4684 break; 4685 } 4686 } 4687 4688 static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev, 4689 struct amdgpu_irq_src *src, 4690 unsigned type, 4691 enum amdgpu_interrupt_state state) 4692 { 4693 u32 cp_int_cntl; 4694 4695 switch (state) { 4696 case AMDGPU_IRQ_STATE_DISABLE: 4697 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); 4698 cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK; 4699 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); 4700 break; 4701 case AMDGPU_IRQ_STATE_ENABLE: 4702 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); 4703 cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK; 4704 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); 4705 break; 4706 default: 4707 break; 4708 } 4709 4710 return 0; 4711 } 4712 4713 static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device *adev, 4714 struct amdgpu_irq_src *src, 4715 unsigned type, 4716 enum amdgpu_interrupt_state state) 4717 { 4718 u32 cp_int_cntl; 4719 4720 switch (state) { 4721 case AMDGPU_IRQ_STATE_DISABLE: 4722 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); 4723 cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK; 4724 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); 4725 break; 4726 case AMDGPU_IRQ_STATE_ENABLE: 4727 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); 4728 cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK; 4729 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); 4730 break; 4731 default: 4732 break; 4733 } 4734 4735 return 0; 4736 } 4737 4738 static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device *adev, 4739 struct amdgpu_irq_src *src, 4740 unsigned type, 4741 enum amdgpu_interrupt_state state) 4742 { 4743 switch (type) { 4744 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP: 4745 gfx_v7_0_set_gfx_eop_interrupt_state(adev, state); 4746 break; 4747 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 4748 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 0, state); 4749 break; 4750 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 4751 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 1, state); 4752 break; 4753 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 4754 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 2, state); 4755 break; 4756 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 4757 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 3, state); 4758 break; 4759 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: 4760 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 0, state); 4761 break; 4762 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: 4763 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 1, state); 4764 break; 4765 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: 4766 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 2, state); 4767 break; 4768 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: 4769 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 3, state); 4770 break; 4771 default: 4772 break; 4773 } 4774 return 0; 4775 } 4776 4777 static int gfx_v7_0_eop_irq(struct amdgpu_device *adev, 4778 struct amdgpu_irq_src *source, 4779 struct amdgpu_iv_entry *entry) 4780 { 4781 u8 me_id, pipe_id; 4782 struct amdgpu_ring *ring; 4783 int i; 4784 4785 DRM_DEBUG("IH: CP EOP\n"); 4786 me_id = (entry->ring_id & 0x0c) >> 2; 4787 pipe_id = (entry->ring_id & 0x03) >> 0; 4788 switch (me_id) { 4789 case 0: 4790 amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 4791 break; 4792 case 1: 4793 case 2: 4794 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4795 ring = &adev->gfx.compute_ring[i]; 4796 if ((ring->me == me_id) && (ring->pipe == pipe_id)) 4797 amdgpu_fence_process(ring); 4798 } 4799 break; 4800 } 4801 return 0; 4802 } 4803 4804 static void gfx_v7_0_fault(struct amdgpu_device *adev, 4805 struct amdgpu_iv_entry *entry) 4806 { 4807 struct amdgpu_ring *ring; 4808 u8 me_id, pipe_id; 4809 int i; 4810 4811 me_id = (entry->ring_id & 0x0c) >> 2; 4812 pipe_id = (entry->ring_id & 0x03) >> 0; 4813 switch (me_id) { 4814 case 0: 4815 drm_sched_fault(&adev->gfx.gfx_ring[0].sched); 4816 break; 4817 case 1: 4818 case 2: 4819 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4820 ring = &adev->gfx.compute_ring[i]; 4821 if ((ring->me == me_id) && (ring->pipe == pipe_id)) 4822 drm_sched_fault(&ring->sched); 4823 } 4824 break; 4825 } 4826 } 4827 4828 static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev, 4829 struct amdgpu_irq_src *source, 4830 struct amdgpu_iv_entry *entry) 4831 { 4832 DRM_ERROR("Illegal register access in command stream\n"); 4833 gfx_v7_0_fault(adev, entry); 4834 return 0; 4835 } 4836 4837 static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev, 4838 struct amdgpu_irq_src *source, 4839 struct amdgpu_iv_entry *entry) 4840 { 4841 DRM_ERROR("Illegal instruction in command stream\n"); 4842 // XXX soft reset the gfx block only 4843 gfx_v7_0_fault(adev, entry); 4844 return 0; 4845 } 4846 4847 static int gfx_v7_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, 4848 enum amd_clockgating_state state) 4849 { 4850 bool gate = false; 4851 struct amdgpu_device *adev = ip_block->adev; 4852 4853 if (state == AMD_CG_STATE_GATE) 4854 gate = true; 4855 4856 gfx_v7_0_enable_gui_idle_interrupt(adev, false); 4857 /* order matters! */ 4858 if (gate) { 4859 gfx_v7_0_enable_mgcg(adev, true); 4860 gfx_v7_0_enable_cgcg(adev, true); 4861 } else { 4862 gfx_v7_0_enable_cgcg(adev, false); 4863 gfx_v7_0_enable_mgcg(adev, false); 4864 } 4865 gfx_v7_0_enable_gui_idle_interrupt(adev, true); 4866 4867 return 0; 4868 } 4869 4870 static int gfx_v7_0_set_powergating_state(struct amdgpu_ip_block *ip_block, 4871 enum amd_powergating_state state) 4872 { 4873 bool gate = false; 4874 struct amdgpu_device *adev = ip_block->adev; 4875 4876 if (state == AMD_PG_STATE_GATE) 4877 gate = true; 4878 4879 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | 4880 AMD_PG_SUPPORT_GFX_SMG | 4881 AMD_PG_SUPPORT_GFX_DMG | 4882 AMD_PG_SUPPORT_CP | 4883 AMD_PG_SUPPORT_GDS | 4884 AMD_PG_SUPPORT_RLC_SMU_HS)) { 4885 gfx_v7_0_update_gfx_pg(adev, gate); 4886 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) { 4887 gfx_v7_0_enable_cp_pg(adev, gate); 4888 gfx_v7_0_enable_gds_pg(adev, gate); 4889 } 4890 } 4891 4892 return 0; 4893 } 4894 4895 static void gfx_v7_0_emit_mem_sync(struct amdgpu_ring *ring) 4896 { 4897 amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 4898 amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA | 4899 PACKET3_TC_ACTION_ENA | 4900 PACKET3_SH_KCACHE_ACTION_ENA | 4901 PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */ 4902 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 4903 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 4904 amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ 4905 } 4906 4907 static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring) 4908 { 4909 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5)); 4910 amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA | 4911 PACKET3_TC_ACTION_ENA | 4912 PACKET3_SH_KCACHE_ACTION_ENA | 4913 PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */ 4914 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 4915 amdgpu_ring_write(ring, 0xff); /* CP_COHER_SIZE_HI */ 4916 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 4917 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 4918 amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ 4919 } 4920 4921 static const struct amd_ip_funcs gfx_v7_0_ip_funcs = { 4922 .name = "gfx_v7_0", 4923 .early_init = gfx_v7_0_early_init, 4924 .late_init = gfx_v7_0_late_init, 4925 .sw_init = gfx_v7_0_sw_init, 4926 .sw_fini = gfx_v7_0_sw_fini, 4927 .hw_init = gfx_v7_0_hw_init, 4928 .hw_fini = gfx_v7_0_hw_fini, 4929 .suspend = gfx_v7_0_suspend, 4930 .resume = gfx_v7_0_resume, 4931 .is_idle = gfx_v7_0_is_idle, 4932 .wait_for_idle = gfx_v7_0_wait_for_idle, 4933 .soft_reset = gfx_v7_0_soft_reset, 4934 .set_clockgating_state = gfx_v7_0_set_clockgating_state, 4935 .set_powergating_state = gfx_v7_0_set_powergating_state, 4936 }; 4937 4938 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { 4939 .type = AMDGPU_RING_TYPE_GFX, 4940 .align_mask = 0xff, 4941 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4942 .support_64bit_ptrs = false, 4943 .get_rptr = gfx_v7_0_ring_get_rptr, 4944 .get_wptr = gfx_v7_0_ring_get_wptr_gfx, 4945 .set_wptr = gfx_v7_0_ring_set_wptr_gfx, 4946 .emit_frame_size = 4947 20 + /* gfx_v7_0_ring_emit_gds_switch */ 4948 7 + /* gfx_v7_0_ring_emit_hdp_flush */ 4949 5 + /* hdp invalidate */ 4950 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */ 4951 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */ 4952 CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */ 4953 3 + 4 + /* gfx_v7_ring_emit_cntxcntl including vgt flush*/ 4954 5, /* SURFACE_SYNC */ 4955 .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */ 4956 .emit_ib = gfx_v7_0_ring_emit_ib_gfx, 4957 .emit_fence = gfx_v7_0_ring_emit_fence_gfx, 4958 .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, 4959 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, 4960 .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, 4961 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, 4962 .test_ring = gfx_v7_0_ring_test_ring, 4963 .test_ib = gfx_v7_0_ring_test_ib, 4964 .insert_nop = amdgpu_ring_insert_nop, 4965 .pad_ib = amdgpu_ring_generic_pad_ib, 4966 .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl, 4967 .emit_wreg = gfx_v7_0_ring_emit_wreg, 4968 .soft_recovery = gfx_v7_0_ring_soft_recovery, 4969 .emit_mem_sync = gfx_v7_0_emit_mem_sync, 4970 }; 4971 4972 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { 4973 .type = AMDGPU_RING_TYPE_COMPUTE, 4974 .align_mask = 0xff, 4975 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4976 .support_64bit_ptrs = false, 4977 .get_rptr = gfx_v7_0_ring_get_rptr, 4978 .get_wptr = gfx_v7_0_ring_get_wptr_compute, 4979 .set_wptr = gfx_v7_0_ring_set_wptr_compute, 4980 .emit_frame_size = 4981 20 + /* gfx_v7_0_ring_emit_gds_switch */ 4982 7 + /* gfx_v7_0_ring_emit_hdp_flush */ 4983 5 + /* hdp invalidate */ 4984 7 + /* gfx_v7_0_ring_emit_pipeline_sync */ 4985 CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */ 4986 7 + 7 + 7 + /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */ 4987 7, /* gfx_v7_0_emit_mem_sync_compute */ 4988 .emit_ib_size = 7, /* gfx_v7_0_ring_emit_ib_compute */ 4989 .emit_ib = gfx_v7_0_ring_emit_ib_compute, 4990 .emit_fence = gfx_v7_0_ring_emit_fence_compute, 4991 .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, 4992 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, 4993 .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, 4994 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, 4995 .test_ring = gfx_v7_0_ring_test_ring, 4996 .test_ib = gfx_v7_0_ring_test_ib, 4997 .insert_nop = amdgpu_ring_insert_nop, 4998 .pad_ib = amdgpu_ring_generic_pad_ib, 4999 .emit_wreg = gfx_v7_0_ring_emit_wreg, 5000 .soft_recovery = gfx_v7_0_ring_soft_recovery, 5001 .emit_mem_sync = gfx_v7_0_emit_mem_sync_compute, 5002 }; 5003 5004 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) 5005 { 5006 int i; 5007 5008 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 5009 adev->gfx.gfx_ring[i].funcs = &gfx_v7_0_ring_funcs_gfx; 5010 for (i = 0; i < adev->gfx.num_compute_rings; i++) 5011 adev->gfx.compute_ring[i].funcs = &gfx_v7_0_ring_funcs_compute; 5012 } 5013 5014 static const struct amdgpu_irq_src_funcs gfx_v7_0_eop_irq_funcs = { 5015 .set = gfx_v7_0_set_eop_interrupt_state, 5016 .process = gfx_v7_0_eop_irq, 5017 }; 5018 5019 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_reg_irq_funcs = { 5020 .set = gfx_v7_0_set_priv_reg_fault_state, 5021 .process = gfx_v7_0_priv_reg_irq, 5022 }; 5023 5024 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_inst_irq_funcs = { 5025 .set = gfx_v7_0_set_priv_inst_fault_state, 5026 .process = gfx_v7_0_priv_inst_irq, 5027 }; 5028 5029 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev) 5030 { 5031 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 5032 adev->gfx.eop_irq.funcs = &gfx_v7_0_eop_irq_funcs; 5033 5034 adev->gfx.priv_reg_irq.num_types = 1; 5035 adev->gfx.priv_reg_irq.funcs = &gfx_v7_0_priv_reg_irq_funcs; 5036 5037 adev->gfx.priv_inst_irq.num_types = 1; 5038 adev->gfx.priv_inst_irq.funcs = &gfx_v7_0_priv_inst_irq_funcs; 5039 } 5040 5041 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev) 5042 { 5043 /* init asci gds info */ 5044 adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE); 5045 adev->gds.gws_size = 64; 5046 adev->gds.oa_size = 16; 5047 adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID); 5048 } 5049 5050 5051 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) 5052 { 5053 int i, j, k, counter, active_cu_number = 0; 5054 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; 5055 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; 5056 unsigned disable_masks[4 * 2]; 5057 u32 ao_cu_num; 5058 5059 if (adev->flags & AMD_IS_APU) 5060 ao_cu_num = 2; 5061 else 5062 ao_cu_num = adev->gfx.config.max_cu_per_sh; 5063 5064 memset(cu_info, 0, sizeof(*cu_info)); 5065 5066 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); 5067 5068 mutex_lock(&adev->grbm_idx_mutex); 5069 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 5070 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 5071 mask = 1; 5072 ao_bitmap = 0; 5073 counter = 0; 5074 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); 5075 if (i < 4 && j < 2) 5076 gfx_v7_0_set_user_cu_inactive_bitmap( 5077 adev, disable_masks[i * 2 + j]); 5078 bitmap = gfx_v7_0_get_cu_active_bitmap(adev); 5079 cu_info->bitmap[0][i][j] = bitmap; 5080 5081 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { 5082 if (bitmap & mask) { 5083 if (counter < ao_cu_num) 5084 ao_bitmap |= mask; 5085 counter++; 5086 } 5087 mask <<= 1; 5088 } 5089 active_cu_number += counter; 5090 if (i < 2 && j < 2) 5091 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); 5092 cu_info->ao_cu_bitmap[i][j] = ao_bitmap; 5093 } 5094 } 5095 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 5096 mutex_unlock(&adev->grbm_idx_mutex); 5097 5098 cu_info->number = active_cu_number; 5099 cu_info->ao_cu_mask = ao_cu_mask; 5100 cu_info->simd_per_cu = NUM_SIMD_PER_CU; 5101 cu_info->max_waves_per_simd = 10; 5102 cu_info->max_scratch_slots_per_cu = 32; 5103 cu_info->wave_front_size = 64; 5104 cu_info->lds_size = 64; 5105 } 5106 5107 const struct amdgpu_ip_block_version gfx_v7_1_ip_block = { 5108 .type = AMD_IP_BLOCK_TYPE_GFX, 5109 .major = 7, 5110 .minor = 1, 5111 .rev = 0, 5112 .funcs = &gfx_v7_0_ip_funcs, 5113 }; 5114 5115 const struct amdgpu_ip_block_version gfx_v7_2_ip_block = { 5116 .type = AMD_IP_BLOCK_TYPE_GFX, 5117 .major = 7, 5118 .minor = 2, 5119 .rev = 0, 5120 .funcs = &gfx_v7_0_ip_funcs, 5121 }; 5122 5123 const struct amdgpu_ip_block_version gfx_v7_3_ip_block = { 5124 .type = AMD_IP_BLOCK_TYPE_GFX, 5125 .major = 7, 5126 .minor = 3, 5127 .rev = 0, 5128 .funcs = &gfx_v7_0_ip_funcs, 5129 }; 5130