1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 #include <linux/module.h> 26 27 #include "amdgpu.h" 28 #include "amdgpu_ih.h" 29 #include "amdgpu_gfx.h" 30 #include "cikd.h" 31 #include "cik.h" 32 #include "cik_structs.h" 33 #include "atom.h" 34 #include "amdgpu_ucode.h" 35 #include "clearstate_ci.h" 36 37 #include "dce/dce_8_0_d.h" 38 #include "dce/dce_8_0_sh_mask.h" 39 40 #include "bif/bif_4_1_d.h" 41 #include "bif/bif_4_1_sh_mask.h" 42 43 #include "gca/gfx_7_0_d.h" 44 #include "gca/gfx_7_2_enum.h" 45 #include "gca/gfx_7_2_sh_mask.h" 46 47 #include "gmc/gmc_7_0_d.h" 48 #include "gmc/gmc_7_0_sh_mask.h" 49 50 #include "oss/oss_2_0_d.h" 51 #include "oss/oss_2_0_sh_mask.h" 52 53 #define NUM_SIMD_PER_CU 0x4 /* missing from the gfx_7 IP headers */ 54 55 #define GFX7_NUM_GFX_RINGS 1 56 #define GFX7_MEC_HPD_SIZE 2048 57 58 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev); 59 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev); 60 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev); 61 62 MODULE_FIRMWARE("amdgpu/bonaire_pfp.bin"); 63 MODULE_FIRMWARE("amdgpu/bonaire_me.bin"); 64 MODULE_FIRMWARE("amdgpu/bonaire_ce.bin"); 65 MODULE_FIRMWARE("amdgpu/bonaire_rlc.bin"); 66 MODULE_FIRMWARE("amdgpu/bonaire_mec.bin"); 67 68 MODULE_FIRMWARE("amdgpu/hawaii_pfp.bin"); 69 MODULE_FIRMWARE("amdgpu/hawaii_me.bin"); 70 MODULE_FIRMWARE("amdgpu/hawaii_ce.bin"); 71 MODULE_FIRMWARE("amdgpu/hawaii_rlc.bin"); 72 MODULE_FIRMWARE("amdgpu/hawaii_mec.bin"); 73 74 MODULE_FIRMWARE("amdgpu/kaveri_pfp.bin"); 75 MODULE_FIRMWARE("amdgpu/kaveri_me.bin"); 76 MODULE_FIRMWARE("amdgpu/kaveri_ce.bin"); 77 MODULE_FIRMWARE("amdgpu/kaveri_rlc.bin"); 78 MODULE_FIRMWARE("amdgpu/kaveri_mec.bin"); 79 MODULE_FIRMWARE("amdgpu/kaveri_mec2.bin"); 80 81 MODULE_FIRMWARE("amdgpu/kabini_pfp.bin"); 82 MODULE_FIRMWARE("amdgpu/kabini_me.bin"); 83 MODULE_FIRMWARE("amdgpu/kabini_ce.bin"); 84 MODULE_FIRMWARE("amdgpu/kabini_rlc.bin"); 85 MODULE_FIRMWARE("amdgpu/kabini_mec.bin"); 86 87 MODULE_FIRMWARE("amdgpu/mullins_pfp.bin"); 88 MODULE_FIRMWARE("amdgpu/mullins_me.bin"); 89 MODULE_FIRMWARE("amdgpu/mullins_ce.bin"); 90 MODULE_FIRMWARE("amdgpu/mullins_rlc.bin"); 91 MODULE_FIRMWARE("amdgpu/mullins_mec.bin"); 92 93 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = { 94 {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0}, 95 {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1}, 96 {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2}, 97 {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3}, 98 {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4}, 99 {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5}, 100 {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6}, 101 {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7}, 102 {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8}, 103 {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9}, 104 {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10}, 105 {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11}, 106 {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12}, 107 {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13}, 108 {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14}, 109 {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15} 110 }; 111 112 static const u32 spectre_rlc_save_restore_register_list[] = { 113 (0x0e00 << 16) | (0xc12c >> 2), 114 0x00000000, 115 (0x0e00 << 16) | (0xc140 >> 2), 116 0x00000000, 117 (0x0e00 << 16) | (0xc150 >> 2), 118 0x00000000, 119 (0x0e00 << 16) | (0xc15c >> 2), 120 0x00000000, 121 (0x0e00 << 16) | (0xc168 >> 2), 122 0x00000000, 123 (0x0e00 << 16) | (0xc170 >> 2), 124 0x00000000, 125 (0x0e00 << 16) | (0xc178 >> 2), 126 0x00000000, 127 (0x0e00 << 16) | (0xc204 >> 2), 128 0x00000000, 129 (0x0e00 << 16) | (0xc2b4 >> 2), 130 0x00000000, 131 (0x0e00 << 16) | (0xc2b8 >> 2), 132 0x00000000, 133 (0x0e00 << 16) | (0xc2bc >> 2), 134 0x00000000, 135 (0x0e00 << 16) | (0xc2c0 >> 2), 136 0x00000000, 137 (0x0e00 << 16) | (0x8228 >> 2), 138 0x00000000, 139 (0x0e00 << 16) | (0x829c >> 2), 140 0x00000000, 141 (0x0e00 << 16) | (0x869c >> 2), 142 0x00000000, 143 (0x0600 << 16) | (0x98f4 >> 2), 144 0x00000000, 145 (0x0e00 << 16) | (0x98f8 >> 2), 146 0x00000000, 147 (0x0e00 << 16) | (0x9900 >> 2), 148 0x00000000, 149 (0x0e00 << 16) | (0xc260 >> 2), 150 0x00000000, 151 (0x0e00 << 16) | (0x90e8 >> 2), 152 0x00000000, 153 (0x0e00 << 16) | (0x3c000 >> 2), 154 0x00000000, 155 (0x0e00 << 16) | (0x3c00c >> 2), 156 0x00000000, 157 (0x0e00 << 16) | (0x8c1c >> 2), 158 0x00000000, 159 (0x0e00 << 16) | (0x9700 >> 2), 160 0x00000000, 161 (0x0e00 << 16) | (0xcd20 >> 2), 162 0x00000000, 163 (0x4e00 << 16) | (0xcd20 >> 2), 164 0x00000000, 165 (0x5e00 << 16) | (0xcd20 >> 2), 166 0x00000000, 167 (0x6e00 << 16) | (0xcd20 >> 2), 168 0x00000000, 169 (0x7e00 << 16) | (0xcd20 >> 2), 170 0x00000000, 171 (0x8e00 << 16) | (0xcd20 >> 2), 172 0x00000000, 173 (0x9e00 << 16) | (0xcd20 >> 2), 174 0x00000000, 175 (0xae00 << 16) | (0xcd20 >> 2), 176 0x00000000, 177 (0xbe00 << 16) | (0xcd20 >> 2), 178 0x00000000, 179 (0x0e00 << 16) | (0x89bc >> 2), 180 0x00000000, 181 (0x0e00 << 16) | (0x8900 >> 2), 182 0x00000000, 183 0x3, 184 (0x0e00 << 16) | (0xc130 >> 2), 185 0x00000000, 186 (0x0e00 << 16) | (0xc134 >> 2), 187 0x00000000, 188 (0x0e00 << 16) | (0xc1fc >> 2), 189 0x00000000, 190 (0x0e00 << 16) | (0xc208 >> 2), 191 0x00000000, 192 (0x0e00 << 16) | (0xc264 >> 2), 193 0x00000000, 194 (0x0e00 << 16) | (0xc268 >> 2), 195 0x00000000, 196 (0x0e00 << 16) | (0xc26c >> 2), 197 0x00000000, 198 (0x0e00 << 16) | (0xc270 >> 2), 199 0x00000000, 200 (0x0e00 << 16) | (0xc274 >> 2), 201 0x00000000, 202 (0x0e00 << 16) | (0xc278 >> 2), 203 0x00000000, 204 (0x0e00 << 16) | (0xc27c >> 2), 205 0x00000000, 206 (0x0e00 << 16) | (0xc280 >> 2), 207 0x00000000, 208 (0x0e00 << 16) | (0xc284 >> 2), 209 0x00000000, 210 (0x0e00 << 16) | (0xc288 >> 2), 211 0x00000000, 212 (0x0e00 << 16) | (0xc28c >> 2), 213 0x00000000, 214 (0x0e00 << 16) | (0xc290 >> 2), 215 0x00000000, 216 (0x0e00 << 16) | (0xc294 >> 2), 217 0x00000000, 218 (0x0e00 << 16) | (0xc298 >> 2), 219 0x00000000, 220 (0x0e00 << 16) | (0xc29c >> 2), 221 0x00000000, 222 (0x0e00 << 16) | (0xc2a0 >> 2), 223 0x00000000, 224 (0x0e00 << 16) | (0xc2a4 >> 2), 225 0x00000000, 226 (0x0e00 << 16) | (0xc2a8 >> 2), 227 0x00000000, 228 (0x0e00 << 16) | (0xc2ac >> 2), 229 0x00000000, 230 (0x0e00 << 16) | (0xc2b0 >> 2), 231 0x00000000, 232 (0x0e00 << 16) | (0x301d0 >> 2), 233 0x00000000, 234 (0x0e00 << 16) | (0x30238 >> 2), 235 0x00000000, 236 (0x0e00 << 16) | (0x30250 >> 2), 237 0x00000000, 238 (0x0e00 << 16) | (0x30254 >> 2), 239 0x00000000, 240 (0x0e00 << 16) | (0x30258 >> 2), 241 0x00000000, 242 (0x0e00 << 16) | (0x3025c >> 2), 243 0x00000000, 244 (0x4e00 << 16) | (0xc900 >> 2), 245 0x00000000, 246 (0x5e00 << 16) | (0xc900 >> 2), 247 0x00000000, 248 (0x6e00 << 16) | (0xc900 >> 2), 249 0x00000000, 250 (0x7e00 << 16) | (0xc900 >> 2), 251 0x00000000, 252 (0x8e00 << 16) | (0xc900 >> 2), 253 0x00000000, 254 (0x9e00 << 16) | (0xc900 >> 2), 255 0x00000000, 256 (0xae00 << 16) | (0xc900 >> 2), 257 0x00000000, 258 (0xbe00 << 16) | (0xc900 >> 2), 259 0x00000000, 260 (0x4e00 << 16) | (0xc904 >> 2), 261 0x00000000, 262 (0x5e00 << 16) | (0xc904 >> 2), 263 0x00000000, 264 (0x6e00 << 16) | (0xc904 >> 2), 265 0x00000000, 266 (0x7e00 << 16) | (0xc904 >> 2), 267 0x00000000, 268 (0x8e00 << 16) | (0xc904 >> 2), 269 0x00000000, 270 (0x9e00 << 16) | (0xc904 >> 2), 271 0x00000000, 272 (0xae00 << 16) | (0xc904 >> 2), 273 0x00000000, 274 (0xbe00 << 16) | (0xc904 >> 2), 275 0x00000000, 276 (0x4e00 << 16) | (0xc908 >> 2), 277 0x00000000, 278 (0x5e00 << 16) | (0xc908 >> 2), 279 0x00000000, 280 (0x6e00 << 16) | (0xc908 >> 2), 281 0x00000000, 282 (0x7e00 << 16) | (0xc908 >> 2), 283 0x00000000, 284 (0x8e00 << 16) | (0xc908 >> 2), 285 0x00000000, 286 (0x9e00 << 16) | (0xc908 >> 2), 287 0x00000000, 288 (0xae00 << 16) | (0xc908 >> 2), 289 0x00000000, 290 (0xbe00 << 16) | (0xc908 >> 2), 291 0x00000000, 292 (0x4e00 << 16) | (0xc90c >> 2), 293 0x00000000, 294 (0x5e00 << 16) | (0xc90c >> 2), 295 0x00000000, 296 (0x6e00 << 16) | (0xc90c >> 2), 297 0x00000000, 298 (0x7e00 << 16) | (0xc90c >> 2), 299 0x00000000, 300 (0x8e00 << 16) | (0xc90c >> 2), 301 0x00000000, 302 (0x9e00 << 16) | (0xc90c >> 2), 303 0x00000000, 304 (0xae00 << 16) | (0xc90c >> 2), 305 0x00000000, 306 (0xbe00 << 16) | (0xc90c >> 2), 307 0x00000000, 308 (0x4e00 << 16) | (0xc910 >> 2), 309 0x00000000, 310 (0x5e00 << 16) | (0xc910 >> 2), 311 0x00000000, 312 (0x6e00 << 16) | (0xc910 >> 2), 313 0x00000000, 314 (0x7e00 << 16) | (0xc910 >> 2), 315 0x00000000, 316 (0x8e00 << 16) | (0xc910 >> 2), 317 0x00000000, 318 (0x9e00 << 16) | (0xc910 >> 2), 319 0x00000000, 320 (0xae00 << 16) | (0xc910 >> 2), 321 0x00000000, 322 (0xbe00 << 16) | (0xc910 >> 2), 323 0x00000000, 324 (0x0e00 << 16) | (0xc99c >> 2), 325 0x00000000, 326 (0x0e00 << 16) | (0x9834 >> 2), 327 0x00000000, 328 (0x0000 << 16) | (0x30f00 >> 2), 329 0x00000000, 330 (0x0001 << 16) | (0x30f00 >> 2), 331 0x00000000, 332 (0x0000 << 16) | (0x30f04 >> 2), 333 0x00000000, 334 (0x0001 << 16) | (0x30f04 >> 2), 335 0x00000000, 336 (0x0000 << 16) | (0x30f08 >> 2), 337 0x00000000, 338 (0x0001 << 16) | (0x30f08 >> 2), 339 0x00000000, 340 (0x0000 << 16) | (0x30f0c >> 2), 341 0x00000000, 342 (0x0001 << 16) | (0x30f0c >> 2), 343 0x00000000, 344 (0x0600 << 16) | (0x9b7c >> 2), 345 0x00000000, 346 (0x0e00 << 16) | (0x8a14 >> 2), 347 0x00000000, 348 (0x0e00 << 16) | (0x8a18 >> 2), 349 0x00000000, 350 (0x0600 << 16) | (0x30a00 >> 2), 351 0x00000000, 352 (0x0e00 << 16) | (0x8bf0 >> 2), 353 0x00000000, 354 (0x0e00 << 16) | (0x8bcc >> 2), 355 0x00000000, 356 (0x0e00 << 16) | (0x8b24 >> 2), 357 0x00000000, 358 (0x0e00 << 16) | (0x30a04 >> 2), 359 0x00000000, 360 (0x0600 << 16) | (0x30a10 >> 2), 361 0x00000000, 362 (0x0600 << 16) | (0x30a14 >> 2), 363 0x00000000, 364 (0x0600 << 16) | (0x30a18 >> 2), 365 0x00000000, 366 (0x0600 << 16) | (0x30a2c >> 2), 367 0x00000000, 368 (0x0e00 << 16) | (0xc700 >> 2), 369 0x00000000, 370 (0x0e00 << 16) | (0xc704 >> 2), 371 0x00000000, 372 (0x0e00 << 16) | (0xc708 >> 2), 373 0x00000000, 374 (0x0e00 << 16) | (0xc768 >> 2), 375 0x00000000, 376 (0x0400 << 16) | (0xc770 >> 2), 377 0x00000000, 378 (0x0400 << 16) | (0xc774 >> 2), 379 0x00000000, 380 (0x0400 << 16) | (0xc778 >> 2), 381 0x00000000, 382 (0x0400 << 16) | (0xc77c >> 2), 383 0x00000000, 384 (0x0400 << 16) | (0xc780 >> 2), 385 0x00000000, 386 (0x0400 << 16) | (0xc784 >> 2), 387 0x00000000, 388 (0x0400 << 16) | (0xc788 >> 2), 389 0x00000000, 390 (0x0400 << 16) | (0xc78c >> 2), 391 0x00000000, 392 (0x0400 << 16) | (0xc798 >> 2), 393 0x00000000, 394 (0x0400 << 16) | (0xc79c >> 2), 395 0x00000000, 396 (0x0400 << 16) | (0xc7a0 >> 2), 397 0x00000000, 398 (0x0400 << 16) | (0xc7a4 >> 2), 399 0x00000000, 400 (0x0400 << 16) | (0xc7a8 >> 2), 401 0x00000000, 402 (0x0400 << 16) | (0xc7ac >> 2), 403 0x00000000, 404 (0x0400 << 16) | (0xc7b0 >> 2), 405 0x00000000, 406 (0x0400 << 16) | (0xc7b4 >> 2), 407 0x00000000, 408 (0x0e00 << 16) | (0x9100 >> 2), 409 0x00000000, 410 (0x0e00 << 16) | (0x3c010 >> 2), 411 0x00000000, 412 (0x0e00 << 16) | (0x92a8 >> 2), 413 0x00000000, 414 (0x0e00 << 16) | (0x92ac >> 2), 415 0x00000000, 416 (0x0e00 << 16) | (0x92b4 >> 2), 417 0x00000000, 418 (0x0e00 << 16) | (0x92b8 >> 2), 419 0x00000000, 420 (0x0e00 << 16) | (0x92bc >> 2), 421 0x00000000, 422 (0x0e00 << 16) | (0x92c0 >> 2), 423 0x00000000, 424 (0x0e00 << 16) | (0x92c4 >> 2), 425 0x00000000, 426 (0x0e00 << 16) | (0x92c8 >> 2), 427 0x00000000, 428 (0x0e00 << 16) | (0x92cc >> 2), 429 0x00000000, 430 (0x0e00 << 16) | (0x92d0 >> 2), 431 0x00000000, 432 (0x0e00 << 16) | (0x8c00 >> 2), 433 0x00000000, 434 (0x0e00 << 16) | (0x8c04 >> 2), 435 0x00000000, 436 (0x0e00 << 16) | (0x8c20 >> 2), 437 0x00000000, 438 (0x0e00 << 16) | (0x8c38 >> 2), 439 0x00000000, 440 (0x0e00 << 16) | (0x8c3c >> 2), 441 0x00000000, 442 (0x0e00 << 16) | (0xae00 >> 2), 443 0x00000000, 444 (0x0e00 << 16) | (0x9604 >> 2), 445 0x00000000, 446 (0x0e00 << 16) | (0xac08 >> 2), 447 0x00000000, 448 (0x0e00 << 16) | (0xac0c >> 2), 449 0x00000000, 450 (0x0e00 << 16) | (0xac10 >> 2), 451 0x00000000, 452 (0x0e00 << 16) | (0xac14 >> 2), 453 0x00000000, 454 (0x0e00 << 16) | (0xac58 >> 2), 455 0x00000000, 456 (0x0e00 << 16) | (0xac68 >> 2), 457 0x00000000, 458 (0x0e00 << 16) | (0xac6c >> 2), 459 0x00000000, 460 (0x0e00 << 16) | (0xac70 >> 2), 461 0x00000000, 462 (0x0e00 << 16) | (0xac74 >> 2), 463 0x00000000, 464 (0x0e00 << 16) | (0xac78 >> 2), 465 0x00000000, 466 (0x0e00 << 16) | (0xac7c >> 2), 467 0x00000000, 468 (0x0e00 << 16) | (0xac80 >> 2), 469 0x00000000, 470 (0x0e00 << 16) | (0xac84 >> 2), 471 0x00000000, 472 (0x0e00 << 16) | (0xac88 >> 2), 473 0x00000000, 474 (0x0e00 << 16) | (0xac8c >> 2), 475 0x00000000, 476 (0x0e00 << 16) | (0x970c >> 2), 477 0x00000000, 478 (0x0e00 << 16) | (0x9714 >> 2), 479 0x00000000, 480 (0x0e00 << 16) | (0x9718 >> 2), 481 0x00000000, 482 (0x0e00 << 16) | (0x971c >> 2), 483 0x00000000, 484 (0x0e00 << 16) | (0x31068 >> 2), 485 0x00000000, 486 (0x4e00 << 16) | (0x31068 >> 2), 487 0x00000000, 488 (0x5e00 << 16) | (0x31068 >> 2), 489 0x00000000, 490 (0x6e00 << 16) | (0x31068 >> 2), 491 0x00000000, 492 (0x7e00 << 16) | (0x31068 >> 2), 493 0x00000000, 494 (0x8e00 << 16) | (0x31068 >> 2), 495 0x00000000, 496 (0x9e00 << 16) | (0x31068 >> 2), 497 0x00000000, 498 (0xae00 << 16) | (0x31068 >> 2), 499 0x00000000, 500 (0xbe00 << 16) | (0x31068 >> 2), 501 0x00000000, 502 (0x0e00 << 16) | (0xcd10 >> 2), 503 0x00000000, 504 (0x0e00 << 16) | (0xcd14 >> 2), 505 0x00000000, 506 (0x0e00 << 16) | (0x88b0 >> 2), 507 0x00000000, 508 (0x0e00 << 16) | (0x88b4 >> 2), 509 0x00000000, 510 (0x0e00 << 16) | (0x88b8 >> 2), 511 0x00000000, 512 (0x0e00 << 16) | (0x88bc >> 2), 513 0x00000000, 514 (0x0400 << 16) | (0x89c0 >> 2), 515 0x00000000, 516 (0x0e00 << 16) | (0x88c4 >> 2), 517 0x00000000, 518 (0x0e00 << 16) | (0x88c8 >> 2), 519 0x00000000, 520 (0x0e00 << 16) | (0x88d0 >> 2), 521 0x00000000, 522 (0x0e00 << 16) | (0x88d4 >> 2), 523 0x00000000, 524 (0x0e00 << 16) | (0x88d8 >> 2), 525 0x00000000, 526 (0x0e00 << 16) | (0x8980 >> 2), 527 0x00000000, 528 (0x0e00 << 16) | (0x30938 >> 2), 529 0x00000000, 530 (0x0e00 << 16) | (0x3093c >> 2), 531 0x00000000, 532 (0x0e00 << 16) | (0x30940 >> 2), 533 0x00000000, 534 (0x0e00 << 16) | (0x89a0 >> 2), 535 0x00000000, 536 (0x0e00 << 16) | (0x30900 >> 2), 537 0x00000000, 538 (0x0e00 << 16) | (0x30904 >> 2), 539 0x00000000, 540 (0x0e00 << 16) | (0x89b4 >> 2), 541 0x00000000, 542 (0x0e00 << 16) | (0x3c210 >> 2), 543 0x00000000, 544 (0x0e00 << 16) | (0x3c214 >> 2), 545 0x00000000, 546 (0x0e00 << 16) | (0x3c218 >> 2), 547 0x00000000, 548 (0x0e00 << 16) | (0x8904 >> 2), 549 0x00000000, 550 0x5, 551 (0x0e00 << 16) | (0x8c28 >> 2), 552 (0x0e00 << 16) | (0x8c2c >> 2), 553 (0x0e00 << 16) | (0x8c30 >> 2), 554 (0x0e00 << 16) | (0x8c34 >> 2), 555 (0x0e00 << 16) | (0x9600 >> 2), 556 }; 557 558 static const u32 kalindi_rlc_save_restore_register_list[] = { 559 (0x0e00 << 16) | (0xc12c >> 2), 560 0x00000000, 561 (0x0e00 << 16) | (0xc140 >> 2), 562 0x00000000, 563 (0x0e00 << 16) | (0xc150 >> 2), 564 0x00000000, 565 (0x0e00 << 16) | (0xc15c >> 2), 566 0x00000000, 567 (0x0e00 << 16) | (0xc168 >> 2), 568 0x00000000, 569 (0x0e00 << 16) | (0xc170 >> 2), 570 0x00000000, 571 (0x0e00 << 16) | (0xc204 >> 2), 572 0x00000000, 573 (0x0e00 << 16) | (0xc2b4 >> 2), 574 0x00000000, 575 (0x0e00 << 16) | (0xc2b8 >> 2), 576 0x00000000, 577 (0x0e00 << 16) | (0xc2bc >> 2), 578 0x00000000, 579 (0x0e00 << 16) | (0xc2c0 >> 2), 580 0x00000000, 581 (0x0e00 << 16) | (0x8228 >> 2), 582 0x00000000, 583 (0x0e00 << 16) | (0x829c >> 2), 584 0x00000000, 585 (0x0e00 << 16) | (0x869c >> 2), 586 0x00000000, 587 (0x0600 << 16) | (0x98f4 >> 2), 588 0x00000000, 589 (0x0e00 << 16) | (0x98f8 >> 2), 590 0x00000000, 591 (0x0e00 << 16) | (0x9900 >> 2), 592 0x00000000, 593 (0x0e00 << 16) | (0xc260 >> 2), 594 0x00000000, 595 (0x0e00 << 16) | (0x90e8 >> 2), 596 0x00000000, 597 (0x0e00 << 16) | (0x3c000 >> 2), 598 0x00000000, 599 (0x0e00 << 16) | (0x3c00c >> 2), 600 0x00000000, 601 (0x0e00 << 16) | (0x8c1c >> 2), 602 0x00000000, 603 (0x0e00 << 16) | (0x9700 >> 2), 604 0x00000000, 605 (0x0e00 << 16) | (0xcd20 >> 2), 606 0x00000000, 607 (0x4e00 << 16) | (0xcd20 >> 2), 608 0x00000000, 609 (0x5e00 << 16) | (0xcd20 >> 2), 610 0x00000000, 611 (0x6e00 << 16) | (0xcd20 >> 2), 612 0x00000000, 613 (0x7e00 << 16) | (0xcd20 >> 2), 614 0x00000000, 615 (0x0e00 << 16) | (0x89bc >> 2), 616 0x00000000, 617 (0x0e00 << 16) | (0x8900 >> 2), 618 0x00000000, 619 0x3, 620 (0x0e00 << 16) | (0xc130 >> 2), 621 0x00000000, 622 (0x0e00 << 16) | (0xc134 >> 2), 623 0x00000000, 624 (0x0e00 << 16) | (0xc1fc >> 2), 625 0x00000000, 626 (0x0e00 << 16) | (0xc208 >> 2), 627 0x00000000, 628 (0x0e00 << 16) | (0xc264 >> 2), 629 0x00000000, 630 (0x0e00 << 16) | (0xc268 >> 2), 631 0x00000000, 632 (0x0e00 << 16) | (0xc26c >> 2), 633 0x00000000, 634 (0x0e00 << 16) | (0xc270 >> 2), 635 0x00000000, 636 (0x0e00 << 16) | (0xc274 >> 2), 637 0x00000000, 638 (0x0e00 << 16) | (0xc28c >> 2), 639 0x00000000, 640 (0x0e00 << 16) | (0xc290 >> 2), 641 0x00000000, 642 (0x0e00 << 16) | (0xc294 >> 2), 643 0x00000000, 644 (0x0e00 << 16) | (0xc298 >> 2), 645 0x00000000, 646 (0x0e00 << 16) | (0xc2a0 >> 2), 647 0x00000000, 648 (0x0e00 << 16) | (0xc2a4 >> 2), 649 0x00000000, 650 (0x0e00 << 16) | (0xc2a8 >> 2), 651 0x00000000, 652 (0x0e00 << 16) | (0xc2ac >> 2), 653 0x00000000, 654 (0x0e00 << 16) | (0x301d0 >> 2), 655 0x00000000, 656 (0x0e00 << 16) | (0x30238 >> 2), 657 0x00000000, 658 (0x0e00 << 16) | (0x30250 >> 2), 659 0x00000000, 660 (0x0e00 << 16) | (0x30254 >> 2), 661 0x00000000, 662 (0x0e00 << 16) | (0x30258 >> 2), 663 0x00000000, 664 (0x0e00 << 16) | (0x3025c >> 2), 665 0x00000000, 666 (0x4e00 << 16) | (0xc900 >> 2), 667 0x00000000, 668 (0x5e00 << 16) | (0xc900 >> 2), 669 0x00000000, 670 (0x6e00 << 16) | (0xc900 >> 2), 671 0x00000000, 672 (0x7e00 << 16) | (0xc900 >> 2), 673 0x00000000, 674 (0x4e00 << 16) | (0xc904 >> 2), 675 0x00000000, 676 (0x5e00 << 16) | (0xc904 >> 2), 677 0x00000000, 678 (0x6e00 << 16) | (0xc904 >> 2), 679 0x00000000, 680 (0x7e00 << 16) | (0xc904 >> 2), 681 0x00000000, 682 (0x4e00 << 16) | (0xc908 >> 2), 683 0x00000000, 684 (0x5e00 << 16) | (0xc908 >> 2), 685 0x00000000, 686 (0x6e00 << 16) | (0xc908 >> 2), 687 0x00000000, 688 (0x7e00 << 16) | (0xc908 >> 2), 689 0x00000000, 690 (0x4e00 << 16) | (0xc90c >> 2), 691 0x00000000, 692 (0x5e00 << 16) | (0xc90c >> 2), 693 0x00000000, 694 (0x6e00 << 16) | (0xc90c >> 2), 695 0x00000000, 696 (0x7e00 << 16) | (0xc90c >> 2), 697 0x00000000, 698 (0x4e00 << 16) | (0xc910 >> 2), 699 0x00000000, 700 (0x5e00 << 16) | (0xc910 >> 2), 701 0x00000000, 702 (0x6e00 << 16) | (0xc910 >> 2), 703 0x00000000, 704 (0x7e00 << 16) | (0xc910 >> 2), 705 0x00000000, 706 (0x0e00 << 16) | (0xc99c >> 2), 707 0x00000000, 708 (0x0e00 << 16) | (0x9834 >> 2), 709 0x00000000, 710 (0x0000 << 16) | (0x30f00 >> 2), 711 0x00000000, 712 (0x0000 << 16) | (0x30f04 >> 2), 713 0x00000000, 714 (0x0000 << 16) | (0x30f08 >> 2), 715 0x00000000, 716 (0x0000 << 16) | (0x30f0c >> 2), 717 0x00000000, 718 (0x0600 << 16) | (0x9b7c >> 2), 719 0x00000000, 720 (0x0e00 << 16) | (0x8a14 >> 2), 721 0x00000000, 722 (0x0e00 << 16) | (0x8a18 >> 2), 723 0x00000000, 724 (0x0600 << 16) | (0x30a00 >> 2), 725 0x00000000, 726 (0x0e00 << 16) | (0x8bf0 >> 2), 727 0x00000000, 728 (0x0e00 << 16) | (0x8bcc >> 2), 729 0x00000000, 730 (0x0e00 << 16) | (0x8b24 >> 2), 731 0x00000000, 732 (0x0e00 << 16) | (0x30a04 >> 2), 733 0x00000000, 734 (0x0600 << 16) | (0x30a10 >> 2), 735 0x00000000, 736 (0x0600 << 16) | (0x30a14 >> 2), 737 0x00000000, 738 (0x0600 << 16) | (0x30a18 >> 2), 739 0x00000000, 740 (0x0600 << 16) | (0x30a2c >> 2), 741 0x00000000, 742 (0x0e00 << 16) | (0xc700 >> 2), 743 0x00000000, 744 (0x0e00 << 16) | (0xc704 >> 2), 745 0x00000000, 746 (0x0e00 << 16) | (0xc708 >> 2), 747 0x00000000, 748 (0x0e00 << 16) | (0xc768 >> 2), 749 0x00000000, 750 (0x0400 << 16) | (0xc770 >> 2), 751 0x00000000, 752 (0x0400 << 16) | (0xc774 >> 2), 753 0x00000000, 754 (0x0400 << 16) | (0xc798 >> 2), 755 0x00000000, 756 (0x0400 << 16) | (0xc79c >> 2), 757 0x00000000, 758 (0x0e00 << 16) | (0x9100 >> 2), 759 0x00000000, 760 (0x0e00 << 16) | (0x3c010 >> 2), 761 0x00000000, 762 (0x0e00 << 16) | (0x8c00 >> 2), 763 0x00000000, 764 (0x0e00 << 16) | (0x8c04 >> 2), 765 0x00000000, 766 (0x0e00 << 16) | (0x8c20 >> 2), 767 0x00000000, 768 (0x0e00 << 16) | (0x8c38 >> 2), 769 0x00000000, 770 (0x0e00 << 16) | (0x8c3c >> 2), 771 0x00000000, 772 (0x0e00 << 16) | (0xae00 >> 2), 773 0x00000000, 774 (0x0e00 << 16) | (0x9604 >> 2), 775 0x00000000, 776 (0x0e00 << 16) | (0xac08 >> 2), 777 0x00000000, 778 (0x0e00 << 16) | (0xac0c >> 2), 779 0x00000000, 780 (0x0e00 << 16) | (0xac10 >> 2), 781 0x00000000, 782 (0x0e00 << 16) | (0xac14 >> 2), 783 0x00000000, 784 (0x0e00 << 16) | (0xac58 >> 2), 785 0x00000000, 786 (0x0e00 << 16) | (0xac68 >> 2), 787 0x00000000, 788 (0x0e00 << 16) | (0xac6c >> 2), 789 0x00000000, 790 (0x0e00 << 16) | (0xac70 >> 2), 791 0x00000000, 792 (0x0e00 << 16) | (0xac74 >> 2), 793 0x00000000, 794 (0x0e00 << 16) | (0xac78 >> 2), 795 0x00000000, 796 (0x0e00 << 16) | (0xac7c >> 2), 797 0x00000000, 798 (0x0e00 << 16) | (0xac80 >> 2), 799 0x00000000, 800 (0x0e00 << 16) | (0xac84 >> 2), 801 0x00000000, 802 (0x0e00 << 16) | (0xac88 >> 2), 803 0x00000000, 804 (0x0e00 << 16) | (0xac8c >> 2), 805 0x00000000, 806 (0x0e00 << 16) | (0x970c >> 2), 807 0x00000000, 808 (0x0e00 << 16) | (0x9714 >> 2), 809 0x00000000, 810 (0x0e00 << 16) | (0x9718 >> 2), 811 0x00000000, 812 (0x0e00 << 16) | (0x971c >> 2), 813 0x00000000, 814 (0x0e00 << 16) | (0x31068 >> 2), 815 0x00000000, 816 (0x4e00 << 16) | (0x31068 >> 2), 817 0x00000000, 818 (0x5e00 << 16) | (0x31068 >> 2), 819 0x00000000, 820 (0x6e00 << 16) | (0x31068 >> 2), 821 0x00000000, 822 (0x7e00 << 16) | (0x31068 >> 2), 823 0x00000000, 824 (0x0e00 << 16) | (0xcd10 >> 2), 825 0x00000000, 826 (0x0e00 << 16) | (0xcd14 >> 2), 827 0x00000000, 828 (0x0e00 << 16) | (0x88b0 >> 2), 829 0x00000000, 830 (0x0e00 << 16) | (0x88b4 >> 2), 831 0x00000000, 832 (0x0e00 << 16) | (0x88b8 >> 2), 833 0x00000000, 834 (0x0e00 << 16) | (0x88bc >> 2), 835 0x00000000, 836 (0x0400 << 16) | (0x89c0 >> 2), 837 0x00000000, 838 (0x0e00 << 16) | (0x88c4 >> 2), 839 0x00000000, 840 (0x0e00 << 16) | (0x88c8 >> 2), 841 0x00000000, 842 (0x0e00 << 16) | (0x88d0 >> 2), 843 0x00000000, 844 (0x0e00 << 16) | (0x88d4 >> 2), 845 0x00000000, 846 (0x0e00 << 16) | (0x88d8 >> 2), 847 0x00000000, 848 (0x0e00 << 16) | (0x8980 >> 2), 849 0x00000000, 850 (0x0e00 << 16) | (0x30938 >> 2), 851 0x00000000, 852 (0x0e00 << 16) | (0x3093c >> 2), 853 0x00000000, 854 (0x0e00 << 16) | (0x30940 >> 2), 855 0x00000000, 856 (0x0e00 << 16) | (0x89a0 >> 2), 857 0x00000000, 858 (0x0e00 << 16) | (0x30900 >> 2), 859 0x00000000, 860 (0x0e00 << 16) | (0x30904 >> 2), 861 0x00000000, 862 (0x0e00 << 16) | (0x89b4 >> 2), 863 0x00000000, 864 (0x0e00 << 16) | (0x3e1fc >> 2), 865 0x00000000, 866 (0x0e00 << 16) | (0x3c210 >> 2), 867 0x00000000, 868 (0x0e00 << 16) | (0x3c214 >> 2), 869 0x00000000, 870 (0x0e00 << 16) | (0x3c218 >> 2), 871 0x00000000, 872 (0x0e00 << 16) | (0x8904 >> 2), 873 0x00000000, 874 0x5, 875 (0x0e00 << 16) | (0x8c28 >> 2), 876 (0x0e00 << 16) | (0x8c2c >> 2), 877 (0x0e00 << 16) | (0x8c30 >> 2), 878 (0x0e00 << 16) | (0x8c34 >> 2), 879 (0x0e00 << 16) | (0x9600 >> 2), 880 }; 881 882 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev); 883 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); 884 static void gfx_v7_0_init_pg(struct amdgpu_device *adev); 885 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev); 886 887 static void gfx_v7_0_free_microcode(struct amdgpu_device *adev) 888 { 889 amdgpu_ucode_release(&adev->gfx.pfp_fw); 890 amdgpu_ucode_release(&adev->gfx.me_fw); 891 amdgpu_ucode_release(&adev->gfx.ce_fw); 892 amdgpu_ucode_release(&adev->gfx.mec_fw); 893 amdgpu_ucode_release(&adev->gfx.mec2_fw); 894 amdgpu_ucode_release(&adev->gfx.rlc_fw); 895 } 896 897 /* 898 * Core functions 899 */ 900 /** 901 * gfx_v7_0_init_microcode - load ucode images from disk 902 * 903 * @adev: amdgpu_device pointer 904 * 905 * Use the firmware interface to load the ucode images into 906 * the driver (not loaded into hw). 907 * Returns 0 on success, error on failure. 908 */ 909 static int gfx_v7_0_init_microcode(struct amdgpu_device *adev) 910 { 911 const char *chip_name; 912 char fw_name[30]; 913 int err; 914 915 DRM_DEBUG("\n"); 916 917 switch (adev->asic_type) { 918 case CHIP_BONAIRE: 919 chip_name = "bonaire"; 920 break; 921 case CHIP_HAWAII: 922 chip_name = "hawaii"; 923 break; 924 case CHIP_KAVERI: 925 chip_name = "kaveri"; 926 break; 927 case CHIP_KABINI: 928 chip_name = "kabini"; 929 break; 930 case CHIP_MULLINS: 931 chip_name = "mullins"; 932 break; 933 default: 934 BUG(); 935 } 936 937 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); 938 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name); 939 if (err) 940 goto out; 941 942 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); 943 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name); 944 if (err) 945 goto out; 946 947 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); 948 err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name); 949 if (err) 950 goto out; 951 952 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); 953 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name); 954 if (err) 955 goto out; 956 957 if (adev->asic_type == CHIP_KAVERI) { 958 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); 959 err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name); 960 if (err) 961 goto out; 962 } 963 964 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); 965 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name); 966 out: 967 if (err) { 968 pr_err("gfx7: Failed to load firmware \"%s\"\n", fw_name); 969 gfx_v7_0_free_microcode(adev); 970 } 971 return err; 972 } 973 974 /** 975 * gfx_v7_0_tiling_mode_table_init - init the hw tiling table 976 * 977 * @adev: amdgpu_device pointer 978 * 979 * Starting with SI, the tiling setup is done globally in a 980 * set of 32 tiling modes. Rather than selecting each set of 981 * parameters per surface as on older asics, we just select 982 * which index in the tiling table we want to use, and the 983 * surface uses those parameters (CIK). 984 */ 985 static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) 986 { 987 const u32 num_tile_mode_states = 988 ARRAY_SIZE(adev->gfx.config.tile_mode_array); 989 const u32 num_secondary_tile_mode_states = 990 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); 991 u32 reg_offset, split_equal_to_row_size; 992 uint32_t *tile, *macrotile; 993 994 tile = adev->gfx.config.tile_mode_array; 995 macrotile = adev->gfx.config.macrotile_mode_array; 996 997 switch (adev->gfx.config.mem_row_size_in_kb) { 998 case 1: 999 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB; 1000 break; 1001 case 2: 1002 default: 1003 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB; 1004 break; 1005 case 4: 1006 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB; 1007 break; 1008 } 1009 1010 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 1011 tile[reg_offset] = 0; 1012 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 1013 macrotile[reg_offset] = 0; 1014 1015 switch (adev->asic_type) { 1016 case CHIP_BONAIRE: 1017 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1018 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1019 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1020 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1021 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1022 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1023 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1024 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1025 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1026 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1027 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1028 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1029 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1030 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1031 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1032 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1033 tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1034 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1035 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1036 TILE_SPLIT(split_equal_to_row_size)); 1037 tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1038 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1039 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1040 tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1041 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1042 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1043 TILE_SPLIT(split_equal_to_row_size)); 1044 tile[7] = (TILE_SPLIT(split_equal_to_row_size)); 1045 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 1046 PIPE_CONFIG(ADDR_SURF_P4_16x16)); 1047 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1048 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1049 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); 1050 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1051 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1052 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1053 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1054 tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1055 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1056 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1057 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1058 tile[12] = (TILE_SPLIT(split_equal_to_row_size)); 1059 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1060 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1061 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 1062 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1063 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1064 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1065 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1066 tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 1067 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1068 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1069 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1070 tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1071 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1072 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1073 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1074 tile[17] = (TILE_SPLIT(split_equal_to_row_size)); 1075 tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1076 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1077 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1078 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1079 tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1080 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1081 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 1082 tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1083 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1084 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1085 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1086 tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 1087 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1088 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1089 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1090 tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1091 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1092 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1093 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1094 tile[23] = (TILE_SPLIT(split_equal_to_row_size)); 1095 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1096 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1097 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1098 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1099 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 1100 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1101 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1102 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1103 tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 1104 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1105 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1106 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1107 tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1108 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1109 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); 1110 tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1111 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1112 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1113 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1114 tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1115 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1116 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1117 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1118 tile[30] = (TILE_SPLIT(split_equal_to_row_size)); 1119 1120 macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1121 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1122 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1123 NUM_BANKS(ADDR_SURF_16_BANK)); 1124 macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1125 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1126 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1127 NUM_BANKS(ADDR_SURF_16_BANK)); 1128 macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1129 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1130 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1131 NUM_BANKS(ADDR_SURF_16_BANK)); 1132 macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1133 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1134 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1135 NUM_BANKS(ADDR_SURF_16_BANK)); 1136 macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1137 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1138 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1139 NUM_BANKS(ADDR_SURF_16_BANK)); 1140 macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1141 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1142 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1143 NUM_BANKS(ADDR_SURF_8_BANK)); 1144 macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1145 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1146 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1147 NUM_BANKS(ADDR_SURF_4_BANK)); 1148 macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 1149 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | 1150 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1151 NUM_BANKS(ADDR_SURF_16_BANK)); 1152 macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 1153 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1154 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1155 NUM_BANKS(ADDR_SURF_16_BANK)); 1156 macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1157 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1158 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1159 NUM_BANKS(ADDR_SURF_16_BANK)); 1160 macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1161 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1162 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1163 NUM_BANKS(ADDR_SURF_16_BANK)); 1164 macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1165 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1166 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1167 NUM_BANKS(ADDR_SURF_16_BANK)); 1168 macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1169 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1170 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1171 NUM_BANKS(ADDR_SURF_8_BANK)); 1172 macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1173 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1174 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1175 NUM_BANKS(ADDR_SURF_4_BANK)); 1176 1177 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 1178 WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]); 1179 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 1180 if (reg_offset != 7) 1181 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]); 1182 break; 1183 case CHIP_HAWAII: 1184 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1185 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1186 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1187 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1188 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1189 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1190 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1191 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1192 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1193 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1194 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1195 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1196 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1197 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1198 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1199 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1200 tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1201 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1202 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1203 TILE_SPLIT(split_equal_to_row_size)); 1204 tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1205 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1206 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1207 TILE_SPLIT(split_equal_to_row_size)); 1208 tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1209 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1210 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1211 TILE_SPLIT(split_equal_to_row_size)); 1212 tile[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1213 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1214 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1215 TILE_SPLIT(split_equal_to_row_size)); 1216 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 1217 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); 1218 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1219 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1220 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); 1221 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1222 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1223 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1224 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1225 tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1226 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1227 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1228 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1229 tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | 1230 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1231 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1232 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1233 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1234 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1235 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 1236 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1237 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1238 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1239 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1240 tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 1241 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1242 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1243 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1244 tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1245 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1246 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1247 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1248 tile[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1249 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1250 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1251 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1252 tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1253 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1254 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1255 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1256 tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1257 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1258 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); 1259 tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1260 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1261 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1262 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1263 tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 1264 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1265 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1266 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1267 tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1268 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1269 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1270 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1271 tile[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1272 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1273 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1274 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1275 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1276 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1277 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1278 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1279 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 1280 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1281 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1282 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1283 tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 1284 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1285 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1286 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1287 tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1288 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1289 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); 1290 tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1291 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1292 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1293 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1294 tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1295 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | 1296 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1297 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1298 tile[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1299 PIPE_CONFIG(ADDR_SURF_P4_16x16) | 1300 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1301 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1302 1303 macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1304 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1305 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1306 NUM_BANKS(ADDR_SURF_16_BANK)); 1307 macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1308 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1309 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1310 NUM_BANKS(ADDR_SURF_16_BANK)); 1311 macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1312 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1313 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1314 NUM_BANKS(ADDR_SURF_16_BANK)); 1315 macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1316 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1317 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1318 NUM_BANKS(ADDR_SURF_16_BANK)); 1319 macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1320 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1321 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1322 NUM_BANKS(ADDR_SURF_8_BANK)); 1323 macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1324 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1325 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1326 NUM_BANKS(ADDR_SURF_4_BANK)); 1327 macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1328 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1329 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1330 NUM_BANKS(ADDR_SURF_4_BANK)); 1331 macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1332 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1333 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1334 NUM_BANKS(ADDR_SURF_16_BANK)); 1335 macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1336 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1337 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1338 NUM_BANKS(ADDR_SURF_16_BANK)); 1339 macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1340 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1341 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1342 NUM_BANKS(ADDR_SURF_16_BANK)); 1343 macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1344 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1345 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1346 NUM_BANKS(ADDR_SURF_8_BANK)); 1347 macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1348 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1349 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1350 NUM_BANKS(ADDR_SURF_16_BANK)); 1351 macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1352 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1353 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1354 NUM_BANKS(ADDR_SURF_8_BANK)); 1355 macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1356 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1357 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | 1358 NUM_BANKS(ADDR_SURF_4_BANK)); 1359 1360 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 1361 WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]); 1362 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 1363 if (reg_offset != 7) 1364 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]); 1365 break; 1366 case CHIP_KABINI: 1367 case CHIP_KAVERI: 1368 case CHIP_MULLINS: 1369 default: 1370 tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1371 PIPE_CONFIG(ADDR_SURF_P2) | 1372 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | 1373 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1374 tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1375 PIPE_CONFIG(ADDR_SURF_P2) | 1376 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | 1377 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1378 tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1379 PIPE_CONFIG(ADDR_SURF_P2) | 1380 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | 1381 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1382 tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1383 PIPE_CONFIG(ADDR_SURF_P2) | 1384 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | 1385 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1386 tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1387 PIPE_CONFIG(ADDR_SURF_P2) | 1388 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1389 TILE_SPLIT(split_equal_to_row_size)); 1390 tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1391 PIPE_CONFIG(ADDR_SURF_P2) | 1392 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); 1393 tile[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1394 PIPE_CONFIG(ADDR_SURF_P2) | 1395 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | 1396 TILE_SPLIT(split_equal_to_row_size)); 1397 tile[7] = (TILE_SPLIT(split_equal_to_row_size)); 1398 tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | 1399 PIPE_CONFIG(ADDR_SURF_P2)); 1400 tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1401 PIPE_CONFIG(ADDR_SURF_P2) | 1402 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); 1403 tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1404 PIPE_CONFIG(ADDR_SURF_P2) | 1405 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1406 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1407 tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1408 PIPE_CONFIG(ADDR_SURF_P2) | 1409 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | 1410 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1411 tile[12] = (TILE_SPLIT(split_equal_to_row_size)); 1412 tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1413 PIPE_CONFIG(ADDR_SURF_P2) | 1414 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); 1415 tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1416 PIPE_CONFIG(ADDR_SURF_P2) | 1417 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1418 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1419 tile[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | 1420 PIPE_CONFIG(ADDR_SURF_P2) | 1421 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1422 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1423 tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1424 PIPE_CONFIG(ADDR_SURF_P2) | 1425 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1426 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1427 tile[17] = (TILE_SPLIT(split_equal_to_row_size)); 1428 tile[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1429 PIPE_CONFIG(ADDR_SURF_P2) | 1430 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1431 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1432 tile[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | 1433 PIPE_CONFIG(ADDR_SURF_P2) | 1434 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING)); 1435 tile[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1436 PIPE_CONFIG(ADDR_SURF_P2) | 1437 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1438 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1439 tile[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | 1440 PIPE_CONFIG(ADDR_SURF_P2) | 1441 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1442 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1443 tile[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | 1444 PIPE_CONFIG(ADDR_SURF_P2) | 1445 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1446 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1447 tile[23] = (TILE_SPLIT(split_equal_to_row_size)); 1448 tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | 1449 PIPE_CONFIG(ADDR_SURF_P2) | 1450 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | 1451 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1452 tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | 1453 PIPE_CONFIG(ADDR_SURF_P2) | 1454 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1455 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1456 tile[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | 1457 PIPE_CONFIG(ADDR_SURF_P2) | 1458 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | 1459 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); 1460 tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | 1461 PIPE_CONFIG(ADDR_SURF_P2) | 1462 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); 1463 tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 1464 PIPE_CONFIG(ADDR_SURF_P2) | 1465 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1466 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); 1467 tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | 1468 PIPE_CONFIG(ADDR_SURF_P2) | 1469 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | 1470 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); 1471 tile[30] = (TILE_SPLIT(split_equal_to_row_size)); 1472 1473 macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1474 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1475 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1476 NUM_BANKS(ADDR_SURF_8_BANK)); 1477 macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1478 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1479 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1480 NUM_BANKS(ADDR_SURF_8_BANK)); 1481 macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1482 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1483 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1484 NUM_BANKS(ADDR_SURF_8_BANK)); 1485 macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1486 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1487 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1488 NUM_BANKS(ADDR_SURF_8_BANK)); 1489 macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1490 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1491 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1492 NUM_BANKS(ADDR_SURF_8_BANK)); 1493 macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1494 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1495 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1496 NUM_BANKS(ADDR_SURF_8_BANK)); 1497 macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1498 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1499 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1500 NUM_BANKS(ADDR_SURF_8_BANK)); 1501 macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | 1502 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | 1503 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1504 NUM_BANKS(ADDR_SURF_16_BANK)); 1505 macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | 1506 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1507 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1508 NUM_BANKS(ADDR_SURF_16_BANK)); 1509 macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 1510 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | 1511 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1512 NUM_BANKS(ADDR_SURF_16_BANK)); 1513 macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | 1514 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1515 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1516 NUM_BANKS(ADDR_SURF_16_BANK)); 1517 macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1518 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | 1519 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1520 NUM_BANKS(ADDR_SURF_16_BANK)); 1521 macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1522 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1523 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | 1524 NUM_BANKS(ADDR_SURF_16_BANK)); 1525 macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | 1526 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | 1527 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | 1528 NUM_BANKS(ADDR_SURF_8_BANK)); 1529 1530 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 1531 WREG32(mmGB_TILE_MODE0 + reg_offset, tile[reg_offset]); 1532 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) 1533 if (reg_offset != 7) 1534 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, macrotile[reg_offset]); 1535 break; 1536 } 1537 } 1538 1539 /** 1540 * gfx_v7_0_select_se_sh - select which SE, SH to address 1541 * 1542 * @adev: amdgpu_device pointer 1543 * @se_num: shader engine to address 1544 * @sh_num: sh block to address 1545 * @instance: Certain registers are instanced per SE or SH. 1546 * 0xffffffff means broadcast to all SEs or SHs (CIK). 1547 * @xcc_id: xcc accelerated compute core id 1548 * Select which SE, SH combinations to address. 1549 */ 1550 static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, 1551 u32 se_num, u32 sh_num, u32 instance, 1552 int xcc_id) 1553 { 1554 u32 data; 1555 1556 if (instance == 0xffffffff) 1557 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); 1558 else 1559 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); 1560 1561 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) 1562 data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | 1563 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK; 1564 else if (se_num == 0xffffffff) 1565 data |= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK | 1566 (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT); 1567 else if (sh_num == 0xffffffff) 1568 data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | 1569 (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT); 1570 else 1571 data |= (sh_num << GRBM_GFX_INDEX__SH_INDEX__SHIFT) | 1572 (se_num << GRBM_GFX_INDEX__SE_INDEX__SHIFT); 1573 WREG32(mmGRBM_GFX_INDEX, data); 1574 } 1575 1576 /** 1577 * gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs 1578 * 1579 * @adev: amdgpu_device pointer 1580 * 1581 * Calculates the bitmask of enabled RBs (CIK). 1582 * Returns the enabled RB bitmask. 1583 */ 1584 static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev) 1585 { 1586 u32 data, mask; 1587 1588 data = RREG32(mmCC_RB_BACKEND_DISABLE); 1589 data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); 1590 1591 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; 1592 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; 1593 1594 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se / 1595 adev->gfx.config.max_sh_per_se); 1596 1597 return (~data) & mask; 1598 } 1599 1600 static void 1601 gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1) 1602 { 1603 switch (adev->asic_type) { 1604 case CHIP_BONAIRE: 1605 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) | 1606 SE_XSEL(1) | SE_YSEL(1); 1607 *rconf1 |= 0x0; 1608 break; 1609 case CHIP_HAWAII: 1610 *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) | 1611 RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) | 1612 PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) | 1613 SE_YSEL(3); 1614 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) | 1615 SE_PAIR_YSEL(2); 1616 break; 1617 case CHIP_KAVERI: 1618 *rconf |= RB_MAP_PKR0(2); 1619 *rconf1 |= 0x0; 1620 break; 1621 case CHIP_KABINI: 1622 case CHIP_MULLINS: 1623 *rconf |= 0x0; 1624 *rconf1 |= 0x0; 1625 break; 1626 default: 1627 DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type); 1628 break; 1629 } 1630 } 1631 1632 static void 1633 gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev, 1634 u32 raster_config, u32 raster_config_1, 1635 unsigned rb_mask, unsigned num_rb) 1636 { 1637 unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1); 1638 unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1); 1639 unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2); 1640 unsigned rb_per_se = num_rb / num_se; 1641 unsigned se_mask[4]; 1642 unsigned se; 1643 1644 se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask; 1645 se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask; 1646 se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask; 1647 se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask; 1648 1649 WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4)); 1650 WARN_ON(!(sh_per_se == 1 || sh_per_se == 2)); 1651 WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2)); 1652 1653 if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) || 1654 (!se_mask[2] && !se_mask[3]))) { 1655 raster_config_1 &= ~SE_PAIR_MAP_MASK; 1656 1657 if (!se_mask[0] && !se_mask[1]) { 1658 raster_config_1 |= 1659 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3); 1660 } else { 1661 raster_config_1 |= 1662 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0); 1663 } 1664 } 1665 1666 for (se = 0; se < num_se; se++) { 1667 unsigned raster_config_se = raster_config; 1668 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se); 1669 unsigned pkr1_mask = pkr0_mask << rb_per_pkr; 1670 int idx = (se / 2) * 2; 1671 1672 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) { 1673 raster_config_se &= ~SE_MAP_MASK; 1674 1675 if (!se_mask[idx]) { 1676 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3); 1677 } else { 1678 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0); 1679 } 1680 } 1681 1682 pkr0_mask &= rb_mask; 1683 pkr1_mask &= rb_mask; 1684 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) { 1685 raster_config_se &= ~PKR_MAP_MASK; 1686 1687 if (!pkr0_mask) { 1688 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3); 1689 } else { 1690 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0); 1691 } 1692 } 1693 1694 if (rb_per_se >= 2) { 1695 unsigned rb0_mask = 1 << (se * rb_per_se); 1696 unsigned rb1_mask = rb0_mask << 1; 1697 1698 rb0_mask &= rb_mask; 1699 rb1_mask &= rb_mask; 1700 if (!rb0_mask || !rb1_mask) { 1701 raster_config_se &= ~RB_MAP_PKR0_MASK; 1702 1703 if (!rb0_mask) { 1704 raster_config_se |= 1705 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3); 1706 } else { 1707 raster_config_se |= 1708 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0); 1709 } 1710 } 1711 1712 if (rb_per_se > 2) { 1713 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr); 1714 rb1_mask = rb0_mask << 1; 1715 rb0_mask &= rb_mask; 1716 rb1_mask &= rb_mask; 1717 if (!rb0_mask || !rb1_mask) { 1718 raster_config_se &= ~RB_MAP_PKR1_MASK; 1719 1720 if (!rb0_mask) { 1721 raster_config_se |= 1722 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3); 1723 } else { 1724 raster_config_se |= 1725 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0); 1726 } 1727 } 1728 } 1729 } 1730 1731 /* GRBM_GFX_INDEX has a different offset on CI+ */ 1732 gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0); 1733 WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se); 1734 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); 1735 } 1736 1737 /* GRBM_GFX_INDEX has a different offset on CI+ */ 1738 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 1739 } 1740 1741 /** 1742 * gfx_v7_0_setup_rb - setup the RBs on the asic 1743 * 1744 * @adev: amdgpu_device pointer 1745 * 1746 * Configures per-SE/SH RB registers (CIK). 1747 */ 1748 static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) 1749 { 1750 int i, j; 1751 u32 data; 1752 u32 raster_config = 0, raster_config_1 = 0; 1753 u32 active_rbs = 0; 1754 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / 1755 adev->gfx.config.max_sh_per_se; 1756 unsigned num_rb_pipes; 1757 1758 mutex_lock(&adev->grbm_idx_mutex); 1759 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1760 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1761 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); 1762 data = gfx_v7_0_get_rb_active_bitmap(adev); 1763 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 1764 rb_bitmap_width_per_sh); 1765 } 1766 } 1767 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 1768 1769 adev->gfx.config.backend_enable_mask = active_rbs; 1770 adev->gfx.config.num_rbs = hweight32(active_rbs); 1771 1772 num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se * 1773 adev->gfx.config.max_shader_engines, 16); 1774 1775 gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1); 1776 1777 if (!adev->gfx.config.backend_enable_mask || 1778 adev->gfx.config.num_rbs >= num_rb_pipes) { 1779 WREG32(mmPA_SC_RASTER_CONFIG, raster_config); 1780 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1); 1781 } else { 1782 gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1, 1783 adev->gfx.config.backend_enable_mask, 1784 num_rb_pipes); 1785 } 1786 1787 /* cache the values for userspace */ 1788 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1789 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1790 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); 1791 adev->gfx.config.rb_config[i][j].rb_backend_disable = 1792 RREG32(mmCC_RB_BACKEND_DISABLE); 1793 adev->gfx.config.rb_config[i][j].user_rb_backend_disable = 1794 RREG32(mmGC_USER_RB_BACKEND_DISABLE); 1795 adev->gfx.config.rb_config[i][j].raster_config = 1796 RREG32(mmPA_SC_RASTER_CONFIG); 1797 adev->gfx.config.rb_config[i][j].raster_config_1 = 1798 RREG32(mmPA_SC_RASTER_CONFIG_1); 1799 } 1800 } 1801 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 1802 mutex_unlock(&adev->grbm_idx_mutex); 1803 } 1804 1805 #define DEFAULT_SH_MEM_BASES (0x6000) 1806 /** 1807 * gfx_v7_0_init_compute_vmid - gart enable 1808 * 1809 * @adev: amdgpu_device pointer 1810 * 1811 * Initialize compute vmid sh_mem registers 1812 * 1813 */ 1814 static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) 1815 { 1816 int i; 1817 uint32_t sh_mem_config; 1818 uint32_t sh_mem_bases; 1819 1820 /* 1821 * Configure apertures: 1822 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) 1823 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) 1824 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) 1825 */ 1826 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16); 1827 sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED << 1828 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; 1829 sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT; 1830 mutex_lock(&adev->srbm_mutex); 1831 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1832 cik_srbm_select(adev, 0, 0, 0, i); 1833 /* CP and shaders */ 1834 WREG32(mmSH_MEM_CONFIG, sh_mem_config); 1835 WREG32(mmSH_MEM_APE1_BASE, 1); 1836 WREG32(mmSH_MEM_APE1_LIMIT, 0); 1837 WREG32(mmSH_MEM_BASES, sh_mem_bases); 1838 } 1839 cik_srbm_select(adev, 0, 0, 0, 0); 1840 mutex_unlock(&adev->srbm_mutex); 1841 1842 /* Initialize all compute VMIDs to have no GDS, GWS, or OA 1843 access. These should be enabled by FW for target VMIDs. */ 1844 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { 1845 WREG32(amdgpu_gds_reg_offset[i].mem_base, 0); 1846 WREG32(amdgpu_gds_reg_offset[i].mem_size, 0); 1847 WREG32(amdgpu_gds_reg_offset[i].gws, 0); 1848 WREG32(amdgpu_gds_reg_offset[i].oa, 0); 1849 } 1850 } 1851 1852 static void gfx_v7_0_init_gds_vmid(struct amdgpu_device *adev) 1853 { 1854 int vmid; 1855 1856 /* 1857 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA 1858 * access. Compute VMIDs should be enabled by FW for target VMIDs, 1859 * the driver can enable them for graphics. VMID0 should maintain 1860 * access so that HWS firmware can save/restore entries. 1861 */ 1862 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) { 1863 WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0); 1864 WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0); 1865 WREG32(amdgpu_gds_reg_offset[vmid].gws, 0); 1866 WREG32(amdgpu_gds_reg_offset[vmid].oa, 0); 1867 } 1868 } 1869 1870 static void gfx_v7_0_config_init(struct amdgpu_device *adev) 1871 { 1872 adev->gfx.config.double_offchip_lds_buf = 1; 1873 } 1874 1875 /** 1876 * gfx_v7_0_constants_init - setup the 3D engine 1877 * 1878 * @adev: amdgpu_device pointer 1879 * 1880 * init the gfx constants such as the 3D engine, tiling configuration 1881 * registers, maximum number of quad pipes, render backends... 1882 */ 1883 static void gfx_v7_0_constants_init(struct amdgpu_device *adev) 1884 { 1885 u32 sh_mem_cfg, sh_static_mem_cfg, sh_mem_base; 1886 u32 tmp; 1887 int i; 1888 1889 WREG32(mmGRBM_CNTL, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT)); 1890 1891 WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 1892 WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 1893 WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config); 1894 1895 gfx_v7_0_tiling_mode_table_init(adev); 1896 1897 gfx_v7_0_setup_rb(adev); 1898 gfx_v7_0_get_cu_info(adev); 1899 gfx_v7_0_config_init(adev); 1900 1901 /* set HW defaults for 3D engine */ 1902 WREG32(mmCP_MEQ_THRESHOLDS, 1903 (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT) | 1904 (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT)); 1905 1906 mutex_lock(&adev->grbm_idx_mutex); 1907 /* 1908 * making sure that the following register writes will be broadcasted 1909 * to all the shaders 1910 */ 1911 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 1912 1913 /* XXX SH_MEM regs */ 1914 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1915 sh_mem_cfg = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, 1916 SH_MEM_ALIGNMENT_MODE_UNALIGNED); 1917 sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, DEFAULT_MTYPE, 1918 MTYPE_NC); 1919 sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, APE1_MTYPE, 1920 MTYPE_UC); 1921 sh_mem_cfg = REG_SET_FIELD(sh_mem_cfg, SH_MEM_CONFIG, PRIVATE_ATC, 0); 1922 1923 sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG, 1924 SWIZZLE_ENABLE, 1); 1925 sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG, 1926 ELEMENT_SIZE, 1); 1927 sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG, 1928 INDEX_STRIDE, 3); 1929 WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg); 1930 1931 mutex_lock(&adev->srbm_mutex); 1932 for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) { 1933 if (i == 0) 1934 sh_mem_base = 0; 1935 else 1936 sh_mem_base = adev->gmc.shared_aperture_start >> 48; 1937 cik_srbm_select(adev, 0, 0, 0, i); 1938 /* CP and shaders */ 1939 WREG32(mmSH_MEM_CONFIG, sh_mem_cfg); 1940 WREG32(mmSH_MEM_APE1_BASE, 1); 1941 WREG32(mmSH_MEM_APE1_LIMIT, 0); 1942 WREG32(mmSH_MEM_BASES, sh_mem_base); 1943 } 1944 cik_srbm_select(adev, 0, 0, 0, 0); 1945 mutex_unlock(&adev->srbm_mutex); 1946 1947 gfx_v7_0_init_compute_vmid(adev); 1948 gfx_v7_0_init_gds_vmid(adev); 1949 1950 WREG32(mmSX_DEBUG_1, 0x20); 1951 1952 WREG32(mmTA_CNTL_AUX, 0x00010000); 1953 1954 tmp = RREG32(mmSPI_CONFIG_CNTL); 1955 tmp |= 0x03000000; 1956 WREG32(mmSPI_CONFIG_CNTL, tmp); 1957 1958 WREG32(mmSQ_CONFIG, 1); 1959 1960 WREG32(mmDB_DEBUG, 0); 1961 1962 tmp = RREG32(mmDB_DEBUG2) & ~0xf00fffff; 1963 tmp |= 0x00000400; 1964 WREG32(mmDB_DEBUG2, tmp); 1965 1966 tmp = RREG32(mmDB_DEBUG3) & ~0x0002021c; 1967 tmp |= 0x00020200; 1968 WREG32(mmDB_DEBUG3, tmp); 1969 1970 tmp = RREG32(mmCB_HW_CONTROL) & ~0x00010000; 1971 tmp |= 0x00018208; 1972 WREG32(mmCB_HW_CONTROL, tmp); 1973 1974 WREG32(mmSPI_CONFIG_CNTL_1, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT)); 1975 1976 WREG32(mmPA_SC_FIFO_SIZE, 1977 ((adev->gfx.config.sc_prim_fifo_size_frontend << PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) | 1978 (adev->gfx.config.sc_prim_fifo_size_backend << PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) | 1979 (adev->gfx.config.sc_hiz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | 1980 (adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT))); 1981 1982 WREG32(mmVGT_NUM_INSTANCES, 1); 1983 1984 WREG32(mmCP_PERFMON_CNTL, 0); 1985 1986 WREG32(mmSQ_CONFIG, 0); 1987 1988 WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS, 1989 ((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT) | 1990 (255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT))); 1991 1992 WREG32(mmVGT_CACHE_INVALIDATION, 1993 (VC_AND_TC << VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT) | 1994 (ES_AND_GS_AUTO << VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT)); 1995 1996 WREG32(mmVGT_GS_VERTEX_REUSE, 16); 1997 WREG32(mmPA_SC_LINE_STIPPLE_STATE, 0); 1998 1999 WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK | 2000 (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT)); 2001 WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK); 2002 2003 tmp = RREG32(mmSPI_ARB_PRIORITY); 2004 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2); 2005 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2); 2006 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2); 2007 tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2); 2008 WREG32(mmSPI_ARB_PRIORITY, tmp); 2009 2010 mutex_unlock(&adev->grbm_idx_mutex); 2011 2012 udelay(50); 2013 } 2014 2015 /** 2016 * gfx_v7_0_ring_test_ring - basic gfx ring test 2017 * 2018 * @ring: amdgpu_ring structure holding ring information 2019 * 2020 * Allocate a scratch register and write to it using the gfx ring (CIK). 2021 * Provides a basic gfx ring test to verify that the ring is working. 2022 * Used by gfx_v7_0_cp_gfx_resume(); 2023 * Returns 0 on success, error on failure. 2024 */ 2025 static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) 2026 { 2027 struct amdgpu_device *adev = ring->adev; 2028 uint32_t tmp = 0; 2029 unsigned i; 2030 int r; 2031 2032 WREG32(mmSCRATCH_REG0, 0xCAFEDEAD); 2033 r = amdgpu_ring_alloc(ring, 3); 2034 if (r) 2035 return r; 2036 2037 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 2038 amdgpu_ring_write(ring, mmSCRATCH_REG0 - PACKET3_SET_UCONFIG_REG_START); 2039 amdgpu_ring_write(ring, 0xDEADBEEF); 2040 amdgpu_ring_commit(ring); 2041 2042 for (i = 0; i < adev->usec_timeout; i++) { 2043 tmp = RREG32(mmSCRATCH_REG0); 2044 if (tmp == 0xDEADBEEF) 2045 break; 2046 udelay(1); 2047 } 2048 if (i >= adev->usec_timeout) 2049 r = -ETIMEDOUT; 2050 return r; 2051 } 2052 2053 /** 2054 * gfx_v7_0_ring_emit_hdp_flush - emit an hdp flush on the cp 2055 * 2056 * @ring: amdgpu_ring structure holding ring information 2057 * 2058 * Emits an hdp flush on the cp. 2059 */ 2060 static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 2061 { 2062 u32 ref_and_mask; 2063 int usepfp = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ? 0 : 1; 2064 2065 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { 2066 switch (ring->me) { 2067 case 1: 2068 ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; 2069 break; 2070 case 2: 2071 ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe; 2072 break; 2073 default: 2074 return; 2075 } 2076 } else { 2077 ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK; 2078 } 2079 2080 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 2081 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */ 2082 WAIT_REG_MEM_FUNCTION(3) | /* == */ 2083 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */ 2084 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ); 2085 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE); 2086 amdgpu_ring_write(ring, ref_and_mask); 2087 amdgpu_ring_write(ring, ref_and_mask); 2088 amdgpu_ring_write(ring, 0x20); /* poll interval */ 2089 } 2090 2091 static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring) 2092 { 2093 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0)); 2094 amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) | 2095 EVENT_INDEX(4)); 2096 2097 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0)); 2098 amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) | 2099 EVENT_INDEX(0)); 2100 } 2101 2102 /** 2103 * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring 2104 * 2105 * @ring: amdgpu_ring structure holding ring information 2106 * @addr: address 2107 * @seq: sequence number 2108 * @flags: fence related flags 2109 * 2110 * Emits a fence sequence number on the gfx ring and flushes 2111 * GPU caches. 2112 */ 2113 static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, 2114 u64 seq, unsigned flags) 2115 { 2116 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 2117 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 2118 /* Workaround for cache flush problems. First send a dummy EOP 2119 * event down the pipe with seq one below. 2120 */ 2121 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 2122 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | 2123 EOP_TC_ACTION_EN | 2124 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 2125 EVENT_INDEX(5))); 2126 amdgpu_ring_write(ring, addr & 0xfffffffc); 2127 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | 2128 DATA_SEL(1) | INT_SEL(0)); 2129 amdgpu_ring_write(ring, lower_32_bits(seq - 1)); 2130 amdgpu_ring_write(ring, upper_32_bits(seq - 1)); 2131 2132 /* Then send the real EOP event down the pipe. */ 2133 amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 2134 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | 2135 EOP_TC_ACTION_EN | 2136 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 2137 EVENT_INDEX(5))); 2138 amdgpu_ring_write(ring, addr & 0xfffffffc); 2139 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | 2140 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 2141 amdgpu_ring_write(ring, lower_32_bits(seq)); 2142 amdgpu_ring_write(ring, upper_32_bits(seq)); 2143 } 2144 2145 /** 2146 * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring 2147 * 2148 * @ring: amdgpu_ring structure holding ring information 2149 * @addr: address 2150 * @seq: sequence number 2151 * @flags: fence related flags 2152 * 2153 * Emits a fence sequence number on the compute ring and flushes 2154 * GPU caches. 2155 */ 2156 static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, 2157 u64 addr, u64 seq, 2158 unsigned flags) 2159 { 2160 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 2161 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 2162 2163 /* RELEASE_MEM - flush caches, send int */ 2164 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5)); 2165 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | 2166 EOP_TC_ACTION_EN | 2167 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 2168 EVENT_INDEX(5))); 2169 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 2170 amdgpu_ring_write(ring, addr & 0xfffffffc); 2171 amdgpu_ring_write(ring, upper_32_bits(addr)); 2172 amdgpu_ring_write(ring, lower_32_bits(seq)); 2173 amdgpu_ring_write(ring, upper_32_bits(seq)); 2174 } 2175 2176 /* 2177 * IB stuff 2178 */ 2179 /** 2180 * gfx_v7_0_ring_emit_ib_gfx - emit an IB (Indirect Buffer) on the ring 2181 * 2182 * @ring: amdgpu_ring structure holding ring information 2183 * @job: job to retrieve vmid from 2184 * @ib: amdgpu indirect buffer object 2185 * @flags: options (AMDGPU_HAVE_CTX_SWITCH) 2186 * 2187 * Emits an DE (drawing engine) or CE (constant engine) IB 2188 * on the gfx ring. IBs are usually generated by userspace 2189 * acceleration drivers and submitted to the kernel for 2190 * scheduling on the ring. This function schedules the IB 2191 * on the gfx ring for execution by the GPU. 2192 */ 2193 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 2194 struct amdgpu_job *job, 2195 struct amdgpu_ib *ib, 2196 uint32_t flags) 2197 { 2198 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 2199 u32 header, control = 0; 2200 2201 /* insert SWITCH_BUFFER packet before first IB in the ring frame */ 2202 if (flags & AMDGPU_HAVE_CTX_SWITCH) { 2203 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 2204 amdgpu_ring_write(ring, 0); 2205 } 2206 2207 if (ib->flags & AMDGPU_IB_FLAG_CE) 2208 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); 2209 else 2210 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 2211 2212 control |= ib->length_dw | (vmid << 24); 2213 2214 amdgpu_ring_write(ring, header); 2215 amdgpu_ring_write(ring, 2216 #ifdef __BIG_ENDIAN 2217 (2 << 0) | 2218 #endif 2219 (ib->gpu_addr & 0xFFFFFFFC)); 2220 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); 2221 amdgpu_ring_write(ring, control); 2222 } 2223 2224 static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 2225 struct amdgpu_job *job, 2226 struct amdgpu_ib *ib, 2227 uint32_t flags) 2228 { 2229 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 2230 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); 2231 2232 /* Currently, there is a high possibility to get wave ID mismatch 2233 * between ME and GDS, leading to a hw deadlock, because ME generates 2234 * different wave IDs than the GDS expects. This situation happens 2235 * randomly when at least 5 compute pipes use GDS ordered append. 2236 * The wave IDs generated by ME are also wrong after suspend/resume. 2237 * Those are probably bugs somewhere else in the kernel driver. 2238 * 2239 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and 2240 * GDS to 0 for this ring (me/pipe). 2241 */ 2242 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { 2243 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2244 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START); 2245 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); 2246 } 2247 2248 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 2249 amdgpu_ring_write(ring, 2250 #ifdef __BIG_ENDIAN 2251 (2 << 0) | 2252 #endif 2253 (ib->gpu_addr & 0xFFFFFFFC)); 2254 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); 2255 amdgpu_ring_write(ring, control); 2256 } 2257 2258 static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) 2259 { 2260 uint32_t dw2 = 0; 2261 2262 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 2263 if (flags & AMDGPU_HAVE_CTX_SWITCH) { 2264 gfx_v7_0_ring_emit_vgt_flush(ring); 2265 /* set load_global_config & load_global_uconfig */ 2266 dw2 |= 0x8001; 2267 /* set load_cs_sh_regs */ 2268 dw2 |= 0x01000000; 2269 /* set load_per_context_state & load_gfx_sh_regs */ 2270 dw2 |= 0x10002; 2271 } 2272 2273 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 2274 amdgpu_ring_write(ring, dw2); 2275 amdgpu_ring_write(ring, 0); 2276 } 2277 2278 /** 2279 * gfx_v7_0_ring_test_ib - basic ring IB test 2280 * 2281 * @ring: amdgpu_ring structure holding ring information 2282 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT 2283 * 2284 * Allocate an IB and execute it on the gfx ring (CIK). 2285 * Provides a basic gfx ring test to verify that IBs are working. 2286 * Returns 0 on success, error on failure. 2287 */ 2288 static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 2289 { 2290 struct amdgpu_device *adev = ring->adev; 2291 struct amdgpu_ib ib; 2292 struct dma_fence *f = NULL; 2293 uint32_t tmp = 0; 2294 long r; 2295 2296 WREG32(mmSCRATCH_REG0, 0xCAFEDEAD); 2297 memset(&ib, 0, sizeof(ib)); 2298 r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib); 2299 if (r) 2300 return r; 2301 2302 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); 2303 ib.ptr[1] = mmSCRATCH_REG0 - PACKET3_SET_UCONFIG_REG_START; 2304 ib.ptr[2] = 0xDEADBEEF; 2305 ib.length_dw = 3; 2306 2307 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 2308 if (r) 2309 goto error; 2310 2311 r = dma_fence_wait_timeout(f, false, timeout); 2312 if (r == 0) { 2313 r = -ETIMEDOUT; 2314 goto error; 2315 } else if (r < 0) { 2316 goto error; 2317 } 2318 tmp = RREG32(mmSCRATCH_REG0); 2319 if (tmp == 0xDEADBEEF) 2320 r = 0; 2321 else 2322 r = -EINVAL; 2323 2324 error: 2325 amdgpu_ib_free(adev, &ib, NULL); 2326 dma_fence_put(f); 2327 return r; 2328 } 2329 2330 /* 2331 * CP. 2332 * On CIK, gfx and compute now have independent command processors. 2333 * 2334 * GFX 2335 * Gfx consists of a single ring and can process both gfx jobs and 2336 * compute jobs. The gfx CP consists of three microengines (ME): 2337 * PFP - Pre-Fetch Parser 2338 * ME - Micro Engine 2339 * CE - Constant Engine 2340 * The PFP and ME make up what is considered the Drawing Engine (DE). 2341 * The CE is an asynchronous engine used for updating buffer desciptors 2342 * used by the DE so that they can be loaded into cache in parallel 2343 * while the DE is processing state update packets. 2344 * 2345 * Compute 2346 * The compute CP consists of two microengines (ME): 2347 * MEC1 - Compute MicroEngine 1 2348 * MEC2 - Compute MicroEngine 2 2349 * Each MEC supports 4 compute pipes and each pipe supports 8 queues. 2350 * The queues are exposed to userspace and are programmed directly 2351 * by the compute runtime. 2352 */ 2353 /** 2354 * gfx_v7_0_cp_gfx_enable - enable/disable the gfx CP MEs 2355 * 2356 * @adev: amdgpu_device pointer 2357 * @enable: enable or disable the MEs 2358 * 2359 * Halts or unhalts the gfx MEs. 2360 */ 2361 static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) 2362 { 2363 if (enable) 2364 WREG32(mmCP_ME_CNTL, 0); 2365 else 2366 WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | 2367 CP_ME_CNTL__PFP_HALT_MASK | 2368 CP_ME_CNTL__CE_HALT_MASK)); 2369 udelay(50); 2370 } 2371 2372 /** 2373 * gfx_v7_0_cp_gfx_load_microcode - load the gfx CP ME ucode 2374 * 2375 * @adev: amdgpu_device pointer 2376 * 2377 * Loads the gfx PFP, ME, and CE ucode. 2378 * Returns 0 for success, -EINVAL if the ucode is not available. 2379 */ 2380 static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device *adev) 2381 { 2382 const struct gfx_firmware_header_v1_0 *pfp_hdr; 2383 const struct gfx_firmware_header_v1_0 *ce_hdr; 2384 const struct gfx_firmware_header_v1_0 *me_hdr; 2385 const __le32 *fw_data; 2386 unsigned i, fw_size; 2387 2388 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw) 2389 return -EINVAL; 2390 2391 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; 2392 ce_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; 2393 me_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; 2394 2395 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2396 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); 2397 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 2398 adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version); 2399 adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version); 2400 adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version); 2401 adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version); 2402 adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version); 2403 adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version); 2404 2405 gfx_v7_0_cp_gfx_enable(adev, false); 2406 2407 /* PFP */ 2408 fw_data = (const __le32 *) 2409 (adev->gfx.pfp_fw->data + 2410 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); 2411 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4; 2412 WREG32(mmCP_PFP_UCODE_ADDR, 0); 2413 for (i = 0; i < fw_size; i++) 2414 WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); 2415 WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); 2416 2417 /* CE */ 2418 fw_data = (const __le32 *) 2419 (adev->gfx.ce_fw->data + 2420 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); 2421 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4; 2422 WREG32(mmCP_CE_UCODE_ADDR, 0); 2423 for (i = 0; i < fw_size; i++) 2424 WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); 2425 WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version); 2426 2427 /* ME */ 2428 fw_data = (const __le32 *) 2429 (adev->gfx.me_fw->data + 2430 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); 2431 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4; 2432 WREG32(mmCP_ME_RAM_WADDR, 0); 2433 for (i = 0; i < fw_size; i++) 2434 WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++)); 2435 WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version); 2436 2437 return 0; 2438 } 2439 2440 /** 2441 * gfx_v7_0_cp_gfx_start - start the gfx ring 2442 * 2443 * @adev: amdgpu_device pointer 2444 * 2445 * Enables the ring and loads the clear state context and other 2446 * packets required to init the ring. 2447 * Returns 0 for success, error for failure. 2448 */ 2449 static int gfx_v7_0_cp_gfx_start(struct amdgpu_device *adev) 2450 { 2451 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; 2452 const struct cs_section_def *sect = NULL; 2453 const struct cs_extent_def *ext = NULL; 2454 int r, i; 2455 2456 /* init the CP */ 2457 WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1); 2458 WREG32(mmCP_ENDIAN_SWAP, 0); 2459 WREG32(mmCP_DEVICE_ID, 1); 2460 2461 gfx_v7_0_cp_gfx_enable(adev, true); 2462 2463 r = amdgpu_ring_alloc(ring, gfx_v7_0_get_csb_size(adev) + 8); 2464 if (r) { 2465 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); 2466 return r; 2467 } 2468 2469 /* init the CE partitions. CE only used for gfx on CIK */ 2470 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); 2471 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); 2472 amdgpu_ring_write(ring, 0x8000); 2473 amdgpu_ring_write(ring, 0x8000); 2474 2475 /* clear state buffer */ 2476 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 2477 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 2478 2479 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 2480 amdgpu_ring_write(ring, 0x80000000); 2481 amdgpu_ring_write(ring, 0x80000000); 2482 2483 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 2484 for (ext = sect->section; ext->extent != NULL; ++ext) { 2485 if (sect->id == SECT_CONTEXT) { 2486 amdgpu_ring_write(ring, 2487 PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); 2488 amdgpu_ring_write(ring, ext->reg_index - PACKET3_SET_CONTEXT_REG_START); 2489 for (i = 0; i < ext->reg_count; i++) 2490 amdgpu_ring_write(ring, ext->extent[i]); 2491 } 2492 } 2493 } 2494 2495 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 2496 amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); 2497 amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config); 2498 amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1); 2499 2500 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 2501 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 2502 2503 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 2504 amdgpu_ring_write(ring, 0); 2505 2506 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 2507 amdgpu_ring_write(ring, 0x00000316); 2508 amdgpu_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 2509 amdgpu_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ 2510 2511 amdgpu_ring_commit(ring); 2512 2513 return 0; 2514 } 2515 2516 /** 2517 * gfx_v7_0_cp_gfx_resume - setup the gfx ring buffer registers 2518 * 2519 * @adev: amdgpu_device pointer 2520 * 2521 * Program the location and size of the gfx ring buffer 2522 * and test it to make sure it's working. 2523 * Returns 0 for success, error for failure. 2524 */ 2525 static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev) 2526 { 2527 struct amdgpu_ring *ring; 2528 u32 tmp; 2529 u32 rb_bufsz; 2530 u64 rb_addr, rptr_addr; 2531 int r; 2532 2533 WREG32(mmCP_SEM_WAIT_TIMER, 0x0); 2534 if (adev->asic_type != CHIP_HAWAII) 2535 WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 2536 2537 /* Set the write pointer delay */ 2538 WREG32(mmCP_RB_WPTR_DELAY, 0); 2539 2540 /* set the RB to use vmid 0 */ 2541 WREG32(mmCP_RB_VMID, 0); 2542 2543 WREG32(mmSCRATCH_ADDR, 0); 2544 2545 /* ring 0 - compute and gfx */ 2546 /* Set ring buffer size */ 2547 ring = &adev->gfx.gfx_ring[0]; 2548 rb_bufsz = order_base_2(ring->ring_size / 8); 2549 tmp = (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2550 #ifdef __BIG_ENDIAN 2551 tmp |= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT; 2552 #endif 2553 WREG32(mmCP_RB0_CNTL, tmp); 2554 2555 /* Initialize the ring buffer's read and write pointers */ 2556 WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK); 2557 ring->wptr = 0; 2558 WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); 2559 2560 /* set the wb address wether it's enabled or not */ 2561 rptr_addr = ring->rptr_gpu_addr; 2562 WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); 2563 WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); 2564 2565 /* scratch register shadowing is no longer supported */ 2566 WREG32(mmSCRATCH_UMSK, 0); 2567 2568 mdelay(1); 2569 WREG32(mmCP_RB0_CNTL, tmp); 2570 2571 rb_addr = ring->gpu_addr >> 8; 2572 WREG32(mmCP_RB0_BASE, rb_addr); 2573 WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr)); 2574 2575 /* start the ring */ 2576 gfx_v7_0_cp_gfx_start(adev); 2577 r = amdgpu_ring_test_helper(ring); 2578 if (r) 2579 return r; 2580 2581 return 0; 2582 } 2583 2584 static u64 gfx_v7_0_ring_get_rptr(struct amdgpu_ring *ring) 2585 { 2586 return *ring->rptr_cpu_addr; 2587 } 2588 2589 static u64 gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) 2590 { 2591 struct amdgpu_device *adev = ring->adev; 2592 2593 return RREG32(mmCP_RB0_WPTR); 2594 } 2595 2596 static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) 2597 { 2598 struct amdgpu_device *adev = ring->adev; 2599 2600 WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); 2601 (void)RREG32(mmCP_RB0_WPTR); 2602 } 2603 2604 static u64 gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring *ring) 2605 { 2606 /* XXX check if swapping is necessary on BE */ 2607 return *ring->wptr_cpu_addr; 2608 } 2609 2610 static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring) 2611 { 2612 struct amdgpu_device *adev = ring->adev; 2613 2614 /* XXX check if swapping is necessary on BE */ 2615 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); 2616 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); 2617 } 2618 2619 /** 2620 * gfx_v7_0_cp_compute_enable - enable/disable the compute CP MEs 2621 * 2622 * @adev: amdgpu_device pointer 2623 * @enable: enable or disable the MEs 2624 * 2625 * Halts or unhalts the compute MEs. 2626 */ 2627 static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 2628 { 2629 if (enable) 2630 WREG32(mmCP_MEC_CNTL, 0); 2631 else 2632 WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | 2633 CP_MEC_CNTL__MEC_ME2_HALT_MASK)); 2634 udelay(50); 2635 } 2636 2637 /** 2638 * gfx_v7_0_cp_compute_load_microcode - load the compute CP ME ucode 2639 * 2640 * @adev: amdgpu_device pointer 2641 * 2642 * Loads the compute MEC1&2 ucode. 2643 * Returns 0 for success, -EINVAL if the ucode is not available. 2644 */ 2645 static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev) 2646 { 2647 const struct gfx_firmware_header_v1_0 *mec_hdr; 2648 const __le32 *fw_data; 2649 unsigned i, fw_size; 2650 2651 if (!adev->gfx.mec_fw) 2652 return -EINVAL; 2653 2654 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 2655 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 2656 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); 2657 adev->gfx.mec_feature_version = le32_to_cpu( 2658 mec_hdr->ucode_feature_version); 2659 2660 gfx_v7_0_cp_compute_enable(adev, false); 2661 2662 /* MEC1 */ 2663 fw_data = (const __le32 *) 2664 (adev->gfx.mec_fw->data + 2665 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); 2666 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; 2667 WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0); 2668 for (i = 0; i < fw_size; i++) 2669 WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++)); 2670 WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0); 2671 2672 if (adev->asic_type == CHIP_KAVERI) { 2673 const struct gfx_firmware_header_v1_0 *mec2_hdr; 2674 2675 if (!adev->gfx.mec2_fw) 2676 return -EINVAL; 2677 2678 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; 2679 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); 2680 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); 2681 adev->gfx.mec2_feature_version = le32_to_cpu( 2682 mec2_hdr->ucode_feature_version); 2683 2684 /* MEC2 */ 2685 fw_data = (const __le32 *) 2686 (adev->gfx.mec2_fw->data + 2687 le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes)); 2688 fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4; 2689 WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0); 2690 for (i = 0; i < fw_size; i++) 2691 WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++)); 2692 WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0); 2693 } 2694 2695 return 0; 2696 } 2697 2698 /** 2699 * gfx_v7_0_cp_compute_fini - stop the compute queues 2700 * 2701 * @adev: amdgpu_device pointer 2702 * 2703 * Stop the compute queues and tear down the driver queue 2704 * info. 2705 */ 2706 static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev) 2707 { 2708 int i; 2709 2710 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 2711 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; 2712 2713 amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL); 2714 } 2715 } 2716 2717 static void gfx_v7_0_mec_fini(struct amdgpu_device *adev) 2718 { 2719 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); 2720 } 2721 2722 static int gfx_v7_0_mec_init(struct amdgpu_device *adev) 2723 { 2724 int r; 2725 u32 *hpd; 2726 size_t mec_hpd_size; 2727 2728 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); 2729 2730 /* take ownership of the relevant compute queues */ 2731 amdgpu_gfx_compute_queue_acquire(adev); 2732 2733 /* allocate space for ALL pipes (even the ones we don't own) */ 2734 mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec 2735 * GFX7_MEC_HPD_SIZE * 2; 2736 2737 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, 2738 AMDGPU_GEM_DOMAIN_VRAM | 2739 AMDGPU_GEM_DOMAIN_GTT, 2740 &adev->gfx.mec.hpd_eop_obj, 2741 &adev->gfx.mec.hpd_eop_gpu_addr, 2742 (void **)&hpd); 2743 if (r) { 2744 dev_warn(adev->dev, "(%d) create, pin or map of HDP EOP bo failed\n", r); 2745 gfx_v7_0_mec_fini(adev); 2746 return r; 2747 } 2748 2749 /* clear memory. Not sure if this is required or not */ 2750 memset(hpd, 0, mec_hpd_size); 2751 2752 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); 2753 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); 2754 2755 return 0; 2756 } 2757 2758 static void gfx_v7_0_compute_pipe_init(struct amdgpu_device *adev, 2759 int mec, int pipe) 2760 { 2761 u64 eop_gpu_addr; 2762 u32 tmp; 2763 size_t eop_offset = (mec * adev->gfx.mec.num_pipe_per_mec + pipe) 2764 * GFX7_MEC_HPD_SIZE * 2; 2765 2766 mutex_lock(&adev->srbm_mutex); 2767 eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + eop_offset; 2768 2769 cik_srbm_select(adev, mec + 1, pipe, 0, 0); 2770 2771 /* write the EOP addr */ 2772 WREG32(mmCP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8); 2773 WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8); 2774 2775 /* set the VMID assigned */ 2776 WREG32(mmCP_HPD_EOP_VMID, 0); 2777 2778 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ 2779 tmp = RREG32(mmCP_HPD_EOP_CONTROL); 2780 tmp &= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK; 2781 tmp |= order_base_2(GFX7_MEC_HPD_SIZE / 8); 2782 WREG32(mmCP_HPD_EOP_CONTROL, tmp); 2783 2784 cik_srbm_select(adev, 0, 0, 0, 0); 2785 mutex_unlock(&adev->srbm_mutex); 2786 } 2787 2788 static int gfx_v7_0_mqd_deactivate(struct amdgpu_device *adev) 2789 { 2790 int i; 2791 2792 /* disable the queue if it's active */ 2793 if (RREG32(mmCP_HQD_ACTIVE) & 1) { 2794 WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1); 2795 for (i = 0; i < adev->usec_timeout; i++) { 2796 if (!(RREG32(mmCP_HQD_ACTIVE) & 1)) 2797 break; 2798 udelay(1); 2799 } 2800 2801 if (i == adev->usec_timeout) 2802 return -ETIMEDOUT; 2803 2804 WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0); 2805 WREG32(mmCP_HQD_PQ_RPTR, 0); 2806 WREG32(mmCP_HQD_PQ_WPTR, 0); 2807 } 2808 2809 return 0; 2810 } 2811 2812 static void gfx_v7_0_mqd_init(struct amdgpu_device *adev, 2813 struct cik_mqd *mqd, 2814 uint64_t mqd_gpu_addr, 2815 struct amdgpu_ring *ring) 2816 { 2817 u64 hqd_gpu_addr; 2818 u64 wb_gpu_addr; 2819 2820 /* init the mqd struct */ 2821 memset(mqd, 0, sizeof(struct cik_mqd)); 2822 2823 mqd->header = 0xC0310800; 2824 mqd->compute_static_thread_mgmt_se0 = 0xffffffff; 2825 mqd->compute_static_thread_mgmt_se1 = 0xffffffff; 2826 mqd->compute_static_thread_mgmt_se2 = 0xffffffff; 2827 mqd->compute_static_thread_mgmt_se3 = 0xffffffff; 2828 2829 /* enable doorbell? */ 2830 mqd->cp_hqd_pq_doorbell_control = 2831 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); 2832 if (ring->use_doorbell) 2833 mqd->cp_hqd_pq_doorbell_control |= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK; 2834 else 2835 mqd->cp_hqd_pq_doorbell_control &= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK; 2836 2837 /* set the pointer to the MQD */ 2838 mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc; 2839 mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr); 2840 2841 /* set MQD vmid to 0 */ 2842 mqd->cp_mqd_control = RREG32(mmCP_MQD_CONTROL); 2843 mqd->cp_mqd_control &= ~CP_MQD_CONTROL__VMID_MASK; 2844 2845 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ 2846 hqd_gpu_addr = ring->gpu_addr >> 8; 2847 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; 2848 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); 2849 2850 /* set up the HQD, this is similar to CP_RB0_CNTL */ 2851 mqd->cp_hqd_pq_control = RREG32(mmCP_HQD_PQ_CONTROL); 2852 mqd->cp_hqd_pq_control &= 2853 ~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK | 2854 CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK); 2855 2856 mqd->cp_hqd_pq_control |= 2857 order_base_2(ring->ring_size / 8); 2858 mqd->cp_hqd_pq_control |= 2859 (order_base_2(AMDGPU_GPU_PAGE_SIZE/8) << 8); 2860 #ifdef __BIG_ENDIAN 2861 mqd->cp_hqd_pq_control |= 2862 2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT; 2863 #endif 2864 mqd->cp_hqd_pq_control &= 2865 ~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK | 2866 CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK | 2867 CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK); 2868 mqd->cp_hqd_pq_control |= 2869 CP_HQD_PQ_CONTROL__PRIV_STATE_MASK | 2870 CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK; /* assuming kernel queue control */ 2871 2872 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ 2873 wb_gpu_addr = ring->wptr_gpu_addr; 2874 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; 2875 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; 2876 2877 /* set the wb address wether it's enabled or not */ 2878 wb_gpu_addr = ring->rptr_gpu_addr; 2879 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; 2880 mqd->cp_hqd_pq_rptr_report_addr_hi = 2881 upper_32_bits(wb_gpu_addr) & 0xffff; 2882 2883 /* enable the doorbell if requested */ 2884 if (ring->use_doorbell) { 2885 mqd->cp_hqd_pq_doorbell_control = 2886 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); 2887 mqd->cp_hqd_pq_doorbell_control &= 2888 ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK; 2889 mqd->cp_hqd_pq_doorbell_control |= 2890 (ring->doorbell_index << 2891 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT); 2892 mqd->cp_hqd_pq_doorbell_control |= 2893 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK; 2894 mqd->cp_hqd_pq_doorbell_control &= 2895 ~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK | 2896 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK); 2897 2898 } else { 2899 mqd->cp_hqd_pq_doorbell_control = 0; 2900 } 2901 2902 /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */ 2903 ring->wptr = 0; 2904 mqd->cp_hqd_pq_wptr = lower_32_bits(ring->wptr); 2905 mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR); 2906 2907 /* set the vmid for the queue */ 2908 mqd->cp_hqd_vmid = 0; 2909 2910 /* defaults */ 2911 mqd->cp_hqd_ib_control = RREG32(mmCP_HQD_IB_CONTROL); 2912 mqd->cp_hqd_ib_base_addr_lo = RREG32(mmCP_HQD_IB_BASE_ADDR); 2913 mqd->cp_hqd_ib_base_addr_hi = RREG32(mmCP_HQD_IB_BASE_ADDR_HI); 2914 mqd->cp_hqd_ib_rptr = RREG32(mmCP_HQD_IB_RPTR); 2915 mqd->cp_hqd_persistent_state = RREG32(mmCP_HQD_PERSISTENT_STATE); 2916 mqd->cp_hqd_sema_cmd = RREG32(mmCP_HQD_SEMA_CMD); 2917 mqd->cp_hqd_msg_type = RREG32(mmCP_HQD_MSG_TYPE); 2918 mqd->cp_hqd_atomic0_preop_lo = RREG32(mmCP_HQD_ATOMIC0_PREOP_LO); 2919 mqd->cp_hqd_atomic0_preop_hi = RREG32(mmCP_HQD_ATOMIC0_PREOP_HI); 2920 mqd->cp_hqd_atomic1_preop_lo = RREG32(mmCP_HQD_ATOMIC1_PREOP_LO); 2921 mqd->cp_hqd_atomic1_preop_hi = RREG32(mmCP_HQD_ATOMIC1_PREOP_HI); 2922 mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR); 2923 mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM); 2924 mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY); 2925 mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY); 2926 mqd->cp_hqd_iq_rptr = RREG32(mmCP_HQD_IQ_RPTR); 2927 2928 /* activate the queue */ 2929 mqd->cp_hqd_active = 1; 2930 } 2931 2932 static int gfx_v7_0_mqd_commit(struct amdgpu_device *adev, struct cik_mqd *mqd) 2933 { 2934 uint32_t tmp; 2935 uint32_t mqd_reg; 2936 uint32_t *mqd_data; 2937 2938 /* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_MQD_CONTROL */ 2939 mqd_data = &mqd->cp_mqd_base_addr_lo; 2940 2941 /* disable wptr polling */ 2942 tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL); 2943 tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0); 2944 WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp); 2945 2946 /* program all HQD registers */ 2947 for (mqd_reg = mmCP_HQD_VMID; mqd_reg <= mmCP_MQD_CONTROL; mqd_reg++) 2948 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]); 2949 2950 /* activate the HQD */ 2951 for (mqd_reg = mmCP_MQD_BASE_ADDR; mqd_reg <= mmCP_HQD_ACTIVE; mqd_reg++) 2952 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]); 2953 2954 return 0; 2955 } 2956 2957 static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id) 2958 { 2959 int r; 2960 u64 mqd_gpu_addr; 2961 struct cik_mqd *mqd; 2962 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 2963 2964 r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE, 2965 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, 2966 &mqd_gpu_addr, (void **)&mqd); 2967 if (r) { 2968 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); 2969 return r; 2970 } 2971 2972 mutex_lock(&adev->srbm_mutex); 2973 cik_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); 2974 2975 gfx_v7_0_mqd_init(adev, mqd, mqd_gpu_addr, ring); 2976 gfx_v7_0_mqd_deactivate(adev); 2977 gfx_v7_0_mqd_commit(adev, mqd); 2978 2979 cik_srbm_select(adev, 0, 0, 0, 0); 2980 mutex_unlock(&adev->srbm_mutex); 2981 2982 amdgpu_bo_kunmap(ring->mqd_obj); 2983 amdgpu_bo_unreserve(ring->mqd_obj); 2984 return 0; 2985 } 2986 2987 /** 2988 * gfx_v7_0_cp_compute_resume - setup the compute queue registers 2989 * 2990 * @adev: amdgpu_device pointer 2991 * 2992 * Program the compute queues and test them to make sure they 2993 * are working. 2994 * Returns 0 for success, error for failure. 2995 */ 2996 static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev) 2997 { 2998 int r, i, j; 2999 u32 tmp; 3000 struct amdgpu_ring *ring; 3001 3002 /* fix up chicken bits */ 3003 tmp = RREG32(mmCP_CPF_DEBUG); 3004 tmp |= (1 << 23); 3005 WREG32(mmCP_CPF_DEBUG, tmp); 3006 3007 /* init all pipes (even the ones we don't own) */ 3008 for (i = 0; i < adev->gfx.mec.num_mec; i++) 3009 for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) 3010 gfx_v7_0_compute_pipe_init(adev, i, j); 3011 3012 /* init the queues */ 3013 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3014 r = gfx_v7_0_compute_queue_init(adev, i); 3015 if (r) { 3016 gfx_v7_0_cp_compute_fini(adev); 3017 return r; 3018 } 3019 } 3020 3021 gfx_v7_0_cp_compute_enable(adev, true); 3022 3023 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3024 ring = &adev->gfx.compute_ring[i]; 3025 amdgpu_ring_test_helper(ring); 3026 } 3027 3028 return 0; 3029 } 3030 3031 static void gfx_v7_0_cp_enable(struct amdgpu_device *adev, bool enable) 3032 { 3033 gfx_v7_0_cp_gfx_enable(adev, enable); 3034 gfx_v7_0_cp_compute_enable(adev, enable); 3035 } 3036 3037 static int gfx_v7_0_cp_load_microcode(struct amdgpu_device *adev) 3038 { 3039 int r; 3040 3041 r = gfx_v7_0_cp_gfx_load_microcode(adev); 3042 if (r) 3043 return r; 3044 r = gfx_v7_0_cp_compute_load_microcode(adev); 3045 if (r) 3046 return r; 3047 3048 return 0; 3049 } 3050 3051 static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 3052 bool enable) 3053 { 3054 u32 tmp = RREG32(mmCP_INT_CNTL_RING0); 3055 3056 if (enable) 3057 tmp |= (CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK | 3058 CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK); 3059 else 3060 tmp &= ~(CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK | 3061 CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK); 3062 WREG32(mmCP_INT_CNTL_RING0, tmp); 3063 } 3064 3065 static int gfx_v7_0_cp_resume(struct amdgpu_device *adev) 3066 { 3067 int r; 3068 3069 gfx_v7_0_enable_gui_idle_interrupt(adev, false); 3070 3071 r = gfx_v7_0_cp_load_microcode(adev); 3072 if (r) 3073 return r; 3074 3075 r = gfx_v7_0_cp_gfx_resume(adev); 3076 if (r) 3077 return r; 3078 r = gfx_v7_0_cp_compute_resume(adev); 3079 if (r) 3080 return r; 3081 3082 gfx_v7_0_enable_gui_idle_interrupt(adev, true); 3083 3084 return 0; 3085 } 3086 3087 /** 3088 * gfx_v7_0_ring_emit_pipeline_sync - cik vm flush using the CP 3089 * 3090 * @ring: the ring to emit the commands to 3091 * 3092 * Sync the command pipeline with the PFP. E.g. wait for everything 3093 * to be completed. 3094 */ 3095 static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 3096 { 3097 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 3098 uint32_t seq = ring->fence_drv.sync_seq; 3099 uint64_t addr = ring->fence_drv.gpu_addr; 3100 3101 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 3102 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ 3103 WAIT_REG_MEM_FUNCTION(3) | /* equal */ 3104 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */ 3105 amdgpu_ring_write(ring, addr & 0xfffffffc); 3106 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 3107 amdgpu_ring_write(ring, seq); 3108 amdgpu_ring_write(ring, 0xffffffff); 3109 amdgpu_ring_write(ring, 4); /* poll interval */ 3110 3111 if (usepfp) { 3112 /* sync CE with ME to prevent CE fetch CEIB before context switch done */ 3113 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 3114 amdgpu_ring_write(ring, 0); 3115 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 3116 amdgpu_ring_write(ring, 0); 3117 } 3118 } 3119 3120 /* 3121 * vm 3122 * VMID 0 is the physical GPU addresses as used by the kernel. 3123 * VMIDs 1-15 are used for userspace clients and are handled 3124 * by the amdgpu vm/hsa code. 3125 */ 3126 /** 3127 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP 3128 * 3129 * @ring: amdgpu_ring pointer 3130 * @vmid: vmid number to use 3131 * @pd_addr: address 3132 * 3133 * Update the page table base and flush the VM TLB 3134 * using the CP (CIK). 3135 */ 3136 static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 3137 unsigned vmid, uint64_t pd_addr) 3138 { 3139 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 3140 3141 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 3142 3143 /* wait for the invalidate to complete */ 3144 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 3145 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ 3146 WAIT_REG_MEM_FUNCTION(0) | /* always */ 3147 WAIT_REG_MEM_ENGINE(0))); /* me */ 3148 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); 3149 amdgpu_ring_write(ring, 0); 3150 amdgpu_ring_write(ring, 0); /* ref */ 3151 amdgpu_ring_write(ring, 0); /* mask */ 3152 amdgpu_ring_write(ring, 0x20); /* poll interval */ 3153 3154 /* compute doesn't have PFP */ 3155 if (usepfp) { 3156 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 3157 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 3158 amdgpu_ring_write(ring, 0x0); 3159 3160 /* synce CE with ME to prevent CE fetch CEIB before context switch done */ 3161 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 3162 amdgpu_ring_write(ring, 0); 3163 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 3164 amdgpu_ring_write(ring, 0); 3165 } 3166 } 3167 3168 static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring, 3169 uint32_t reg, uint32_t val) 3170 { 3171 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 3172 3173 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3174 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | 3175 WRITE_DATA_DST_SEL(0))); 3176 amdgpu_ring_write(ring, reg); 3177 amdgpu_ring_write(ring, 0); 3178 amdgpu_ring_write(ring, val); 3179 } 3180 3181 /* 3182 * RLC 3183 * The RLC is a multi-purpose microengine that handles a 3184 * variety of functions. 3185 */ 3186 static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) 3187 { 3188 const u32 *src_ptr; 3189 u32 dws; 3190 const struct cs_section_def *cs_data; 3191 int r; 3192 3193 /* allocate rlc buffers */ 3194 if (adev->flags & AMD_IS_APU) { 3195 if (adev->asic_type == CHIP_KAVERI) { 3196 adev->gfx.rlc.reg_list = spectre_rlc_save_restore_register_list; 3197 adev->gfx.rlc.reg_list_size = 3198 (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list); 3199 } else { 3200 adev->gfx.rlc.reg_list = kalindi_rlc_save_restore_register_list; 3201 adev->gfx.rlc.reg_list_size = 3202 (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list); 3203 } 3204 } 3205 adev->gfx.rlc.cs_data = ci_cs_data; 3206 adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */ 3207 adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */ 3208 3209 src_ptr = adev->gfx.rlc.reg_list; 3210 dws = adev->gfx.rlc.reg_list_size; 3211 dws += (5 * 16) + 48 + 48 + 64; 3212 3213 cs_data = adev->gfx.rlc.cs_data; 3214 3215 if (src_ptr) { 3216 /* init save restore block */ 3217 r = amdgpu_gfx_rlc_init_sr(adev, dws); 3218 if (r) 3219 return r; 3220 } 3221 3222 if (cs_data) { 3223 /* init clear state block */ 3224 r = amdgpu_gfx_rlc_init_csb(adev); 3225 if (r) 3226 return r; 3227 } 3228 3229 if (adev->gfx.rlc.cp_table_size) { 3230 r = amdgpu_gfx_rlc_init_cpt(adev); 3231 if (r) 3232 return r; 3233 } 3234 3235 /* init spm vmid with 0xf */ 3236 if (adev->gfx.rlc.funcs->update_spm_vmid) 3237 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); 3238 3239 return 0; 3240 } 3241 3242 static void gfx_v7_0_enable_lbpw(struct amdgpu_device *adev, bool enable) 3243 { 3244 u32 tmp; 3245 3246 tmp = RREG32(mmRLC_LB_CNTL); 3247 if (enable) 3248 tmp |= RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK; 3249 else 3250 tmp &= ~RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK; 3251 WREG32(mmRLC_LB_CNTL, tmp); 3252 } 3253 3254 static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) 3255 { 3256 u32 i, j, k; 3257 u32 mask; 3258 3259 mutex_lock(&adev->grbm_idx_mutex); 3260 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 3261 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 3262 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); 3263 for (k = 0; k < adev->usec_timeout; k++) { 3264 if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) 3265 break; 3266 udelay(1); 3267 } 3268 } 3269 } 3270 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 3271 mutex_unlock(&adev->grbm_idx_mutex); 3272 3273 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | 3274 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK | 3275 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | 3276 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; 3277 for (k = 0; k < adev->usec_timeout; k++) { 3278 if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) 3279 break; 3280 udelay(1); 3281 } 3282 } 3283 3284 static void gfx_v7_0_update_rlc(struct amdgpu_device *adev, u32 rlc) 3285 { 3286 u32 tmp; 3287 3288 tmp = RREG32(mmRLC_CNTL); 3289 if (tmp != rlc) 3290 WREG32(mmRLC_CNTL, rlc); 3291 } 3292 3293 static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev) 3294 { 3295 u32 data, orig; 3296 3297 orig = data = RREG32(mmRLC_CNTL); 3298 3299 if (data & RLC_CNTL__RLC_ENABLE_F32_MASK) { 3300 u32 i; 3301 3302 data &= ~RLC_CNTL__RLC_ENABLE_F32_MASK; 3303 WREG32(mmRLC_CNTL, data); 3304 3305 for (i = 0; i < adev->usec_timeout; i++) { 3306 if ((RREG32(mmRLC_GPM_STAT) & RLC_GPM_STAT__RLC_BUSY_MASK) == 0) 3307 break; 3308 udelay(1); 3309 } 3310 3311 gfx_v7_0_wait_for_rlc_serdes(adev); 3312 } 3313 3314 return orig; 3315 } 3316 3317 static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev) 3318 { 3319 return true; 3320 } 3321 3322 static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id) 3323 { 3324 u32 tmp, i, mask; 3325 3326 tmp = 0x1 | (1 << 1); 3327 WREG32(mmRLC_GPR_REG2, tmp); 3328 3329 mask = RLC_GPM_STAT__GFX_POWER_STATUS_MASK | 3330 RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK; 3331 for (i = 0; i < adev->usec_timeout; i++) { 3332 if ((RREG32(mmRLC_GPM_STAT) & mask) == mask) 3333 break; 3334 udelay(1); 3335 } 3336 3337 for (i = 0; i < adev->usec_timeout; i++) { 3338 if ((RREG32(mmRLC_GPR_REG2) & 0x1) == 0) 3339 break; 3340 udelay(1); 3341 } 3342 } 3343 3344 static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id) 3345 { 3346 u32 tmp; 3347 3348 tmp = 0x1 | (0 << 1); 3349 WREG32(mmRLC_GPR_REG2, tmp); 3350 } 3351 3352 /** 3353 * gfx_v7_0_rlc_stop - stop the RLC ME 3354 * 3355 * @adev: amdgpu_device pointer 3356 * 3357 * Halt the RLC ME (MicroEngine) (CIK). 3358 */ 3359 static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev) 3360 { 3361 WREG32(mmRLC_CNTL, 0); 3362 3363 gfx_v7_0_enable_gui_idle_interrupt(adev, false); 3364 3365 gfx_v7_0_wait_for_rlc_serdes(adev); 3366 } 3367 3368 /** 3369 * gfx_v7_0_rlc_start - start the RLC ME 3370 * 3371 * @adev: amdgpu_device pointer 3372 * 3373 * Unhalt the RLC ME (MicroEngine) (CIK). 3374 */ 3375 static void gfx_v7_0_rlc_start(struct amdgpu_device *adev) 3376 { 3377 WREG32(mmRLC_CNTL, RLC_CNTL__RLC_ENABLE_F32_MASK); 3378 3379 gfx_v7_0_enable_gui_idle_interrupt(adev, true); 3380 3381 udelay(50); 3382 } 3383 3384 static void gfx_v7_0_rlc_reset(struct amdgpu_device *adev) 3385 { 3386 u32 tmp = RREG32(mmGRBM_SOFT_RESET); 3387 3388 tmp |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK; 3389 WREG32(mmGRBM_SOFT_RESET, tmp); 3390 udelay(50); 3391 tmp &= ~GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK; 3392 WREG32(mmGRBM_SOFT_RESET, tmp); 3393 udelay(50); 3394 } 3395 3396 /** 3397 * gfx_v7_0_rlc_resume - setup the RLC hw 3398 * 3399 * @adev: amdgpu_device pointer 3400 * 3401 * Initialize the RLC registers, load the ucode, 3402 * and start the RLC (CIK). 3403 * Returns 0 for success, -EINVAL if the ucode is not available. 3404 */ 3405 static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) 3406 { 3407 const struct rlc_firmware_header_v1_0 *hdr; 3408 const __le32 *fw_data; 3409 unsigned i, fw_size; 3410 u32 tmp; 3411 3412 if (!adev->gfx.rlc_fw) 3413 return -EINVAL; 3414 3415 hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; 3416 amdgpu_ucode_print_rlc_hdr(&hdr->header); 3417 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); 3418 adev->gfx.rlc_feature_version = le32_to_cpu( 3419 hdr->ucode_feature_version); 3420 3421 adev->gfx.rlc.funcs->stop(adev); 3422 3423 /* disable CG */ 3424 tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc; 3425 WREG32(mmRLC_CGCG_CGLS_CTRL, tmp); 3426 3427 adev->gfx.rlc.funcs->reset(adev); 3428 3429 gfx_v7_0_init_pg(adev); 3430 3431 WREG32(mmRLC_LB_CNTR_INIT, 0); 3432 WREG32(mmRLC_LB_CNTR_MAX, 0x00008000); 3433 3434 mutex_lock(&adev->grbm_idx_mutex); 3435 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 3436 WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff); 3437 WREG32(mmRLC_LB_PARAMS, 0x00600408); 3438 WREG32(mmRLC_LB_CNTL, 0x80000004); 3439 mutex_unlock(&adev->grbm_idx_mutex); 3440 3441 WREG32(mmRLC_MC_CNTL, 0); 3442 WREG32(mmRLC_UCODE_CNTL, 0); 3443 3444 fw_data = (const __le32 *) 3445 (adev->gfx.rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 3446 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 3447 WREG32(mmRLC_GPM_UCODE_ADDR, 0); 3448 for (i = 0; i < fw_size; i++) 3449 WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); 3450 WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); 3451 3452 /* XXX - find out what chips support lbpw */ 3453 gfx_v7_0_enable_lbpw(adev, false); 3454 3455 if (adev->asic_type == CHIP_BONAIRE) 3456 WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0); 3457 3458 adev->gfx.rlc.funcs->start(adev); 3459 3460 return 0; 3461 } 3462 3463 static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid) 3464 { 3465 u32 data; 3466 3467 amdgpu_gfx_off_ctrl(adev, false); 3468 3469 data = RREG32(mmRLC_SPM_VMID); 3470 3471 data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK; 3472 data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT; 3473 3474 WREG32(mmRLC_SPM_VMID, data); 3475 3476 amdgpu_gfx_off_ctrl(adev, true); 3477 } 3478 3479 static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) 3480 { 3481 u32 data, orig, tmp, tmp2; 3482 3483 orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL); 3484 3485 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { 3486 gfx_v7_0_enable_gui_idle_interrupt(adev, true); 3487 3488 tmp = gfx_v7_0_halt_rlc(adev); 3489 3490 mutex_lock(&adev->grbm_idx_mutex); 3491 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 3492 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 3493 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 3494 tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | 3495 RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK | 3496 RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK; 3497 WREG32(mmRLC_SERDES_WR_CTRL, tmp2); 3498 mutex_unlock(&adev->grbm_idx_mutex); 3499 3500 gfx_v7_0_update_rlc(adev, tmp); 3501 3502 data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; 3503 if (orig != data) 3504 WREG32(mmRLC_CGCG_CGLS_CTRL, data); 3505 3506 } else { 3507 gfx_v7_0_enable_gui_idle_interrupt(adev, false); 3508 3509 RREG32(mmCB_CGTT_SCLK_CTRL); 3510 RREG32(mmCB_CGTT_SCLK_CTRL); 3511 RREG32(mmCB_CGTT_SCLK_CTRL); 3512 RREG32(mmCB_CGTT_SCLK_CTRL); 3513 3514 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); 3515 if (orig != data) 3516 WREG32(mmRLC_CGCG_CGLS_CTRL, data); 3517 3518 gfx_v7_0_enable_gui_idle_interrupt(adev, true); 3519 } 3520 } 3521 3522 static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) 3523 { 3524 u32 data, orig, tmp = 0; 3525 3526 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { 3527 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { 3528 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { 3529 orig = data = RREG32(mmCP_MEM_SLP_CNTL); 3530 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 3531 if (orig != data) 3532 WREG32(mmCP_MEM_SLP_CNTL, data); 3533 } 3534 } 3535 3536 orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); 3537 data |= 0x00000001; 3538 data &= 0xfffffffd; 3539 if (orig != data) 3540 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data); 3541 3542 tmp = gfx_v7_0_halt_rlc(adev); 3543 3544 mutex_lock(&adev->grbm_idx_mutex); 3545 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 3546 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 3547 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 3548 data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | 3549 RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK; 3550 WREG32(mmRLC_SERDES_WR_CTRL, data); 3551 mutex_unlock(&adev->grbm_idx_mutex); 3552 3553 gfx_v7_0_update_rlc(adev, tmp); 3554 3555 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) { 3556 orig = data = RREG32(mmCGTS_SM_CTRL_REG); 3557 data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK; 3558 data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT); 3559 data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK; 3560 data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK; 3561 if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) && 3562 (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS)) 3563 data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; 3564 data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK; 3565 data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK; 3566 data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT); 3567 if (orig != data) 3568 WREG32(mmCGTS_SM_CTRL_REG, data); 3569 } 3570 } else { 3571 orig = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); 3572 data |= 0x00000003; 3573 if (orig != data) 3574 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data); 3575 3576 data = RREG32(mmRLC_MEM_SLP_CNTL); 3577 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { 3578 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; 3579 WREG32(mmRLC_MEM_SLP_CNTL, data); 3580 } 3581 3582 data = RREG32(mmCP_MEM_SLP_CNTL); 3583 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { 3584 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 3585 WREG32(mmCP_MEM_SLP_CNTL, data); 3586 } 3587 3588 orig = data = RREG32(mmCGTS_SM_CTRL_REG); 3589 data |= CGTS_SM_CTRL_REG__OVERRIDE_MASK | CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; 3590 if (orig != data) 3591 WREG32(mmCGTS_SM_CTRL_REG, data); 3592 3593 tmp = gfx_v7_0_halt_rlc(adev); 3594 3595 mutex_lock(&adev->grbm_idx_mutex); 3596 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 3597 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 3598 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 3599 data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK; 3600 WREG32(mmRLC_SERDES_WR_CTRL, data); 3601 mutex_unlock(&adev->grbm_idx_mutex); 3602 3603 gfx_v7_0_update_rlc(adev, tmp); 3604 } 3605 } 3606 3607 static void gfx_v7_0_update_cg(struct amdgpu_device *adev, 3608 bool enable) 3609 { 3610 gfx_v7_0_enable_gui_idle_interrupt(adev, false); 3611 /* order matters! */ 3612 if (enable) { 3613 gfx_v7_0_enable_mgcg(adev, true); 3614 gfx_v7_0_enable_cgcg(adev, true); 3615 } else { 3616 gfx_v7_0_enable_cgcg(adev, false); 3617 gfx_v7_0_enable_mgcg(adev, false); 3618 } 3619 gfx_v7_0_enable_gui_idle_interrupt(adev, true); 3620 } 3621 3622 static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev, 3623 bool enable) 3624 { 3625 u32 data, orig; 3626 3627 orig = data = RREG32(mmRLC_PG_CNTL); 3628 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS)) 3629 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; 3630 else 3631 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; 3632 if (orig != data) 3633 WREG32(mmRLC_PG_CNTL, data); 3634 } 3635 3636 static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev, 3637 bool enable) 3638 { 3639 u32 data, orig; 3640 3641 orig = data = RREG32(mmRLC_PG_CNTL); 3642 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS)) 3643 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; 3644 else 3645 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; 3646 if (orig != data) 3647 WREG32(mmRLC_PG_CNTL, data); 3648 } 3649 3650 static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable) 3651 { 3652 u32 data, orig; 3653 3654 orig = data = RREG32(mmRLC_PG_CNTL); 3655 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP)) 3656 data &= ~0x8000; 3657 else 3658 data |= 0x8000; 3659 if (orig != data) 3660 WREG32(mmRLC_PG_CNTL, data); 3661 } 3662 3663 static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable) 3664 { 3665 u32 data, orig; 3666 3667 orig = data = RREG32(mmRLC_PG_CNTL); 3668 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS)) 3669 data &= ~0x2000; 3670 else 3671 data |= 0x2000; 3672 if (orig != data) 3673 WREG32(mmRLC_PG_CNTL, data); 3674 } 3675 3676 static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev) 3677 { 3678 if (adev->asic_type == CHIP_KAVERI) 3679 return 5; 3680 else 3681 return 4; 3682 } 3683 3684 static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, 3685 bool enable) 3686 { 3687 u32 data, orig; 3688 3689 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { 3690 orig = data = RREG32(mmRLC_PG_CNTL); 3691 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 3692 if (orig != data) 3693 WREG32(mmRLC_PG_CNTL, data); 3694 3695 orig = data = RREG32(mmRLC_AUTO_PG_CTRL); 3696 data |= RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK; 3697 if (orig != data) 3698 WREG32(mmRLC_AUTO_PG_CTRL, data); 3699 } else { 3700 orig = data = RREG32(mmRLC_PG_CNTL); 3701 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 3702 if (orig != data) 3703 WREG32(mmRLC_PG_CNTL, data); 3704 3705 orig = data = RREG32(mmRLC_AUTO_PG_CTRL); 3706 data &= ~RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK; 3707 if (orig != data) 3708 WREG32(mmRLC_AUTO_PG_CTRL, data); 3709 3710 data = RREG32(mmDB_RENDER_CONTROL); 3711 } 3712 } 3713 3714 static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, 3715 u32 bitmap) 3716 { 3717 u32 data; 3718 3719 if (!bitmap) 3720 return; 3721 3722 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 3723 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 3724 3725 WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data); 3726 } 3727 3728 static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev) 3729 { 3730 u32 data, mask; 3731 3732 data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); 3733 data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); 3734 3735 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; 3736 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; 3737 3738 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh); 3739 3740 return (~data) & mask; 3741 } 3742 3743 static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev) 3744 { 3745 u32 tmp; 3746 3747 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); 3748 3749 tmp = RREG32(mmRLC_MAX_PG_CU); 3750 tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK; 3751 tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT); 3752 WREG32(mmRLC_MAX_PG_CU, tmp); 3753 } 3754 3755 static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev, 3756 bool enable) 3757 { 3758 u32 data, orig; 3759 3760 orig = data = RREG32(mmRLC_PG_CNTL); 3761 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG)) 3762 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; 3763 else 3764 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; 3765 if (orig != data) 3766 WREG32(mmRLC_PG_CNTL, data); 3767 } 3768 3769 static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev, 3770 bool enable) 3771 { 3772 u32 data, orig; 3773 3774 orig = data = RREG32(mmRLC_PG_CNTL); 3775 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG)) 3776 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; 3777 else 3778 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; 3779 if (orig != data) 3780 WREG32(mmRLC_PG_CNTL, data); 3781 } 3782 3783 #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90 3784 #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D 3785 3786 static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device *adev) 3787 { 3788 u32 data, orig; 3789 u32 i; 3790 3791 if (adev->gfx.rlc.cs_data) { 3792 WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET); 3793 WREG32(mmRLC_GPM_SCRATCH_DATA, upper_32_bits(adev->gfx.rlc.clear_state_gpu_addr)); 3794 WREG32(mmRLC_GPM_SCRATCH_DATA, lower_32_bits(adev->gfx.rlc.clear_state_gpu_addr)); 3795 WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.clear_state_size); 3796 } else { 3797 WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET); 3798 for (i = 0; i < 3; i++) 3799 WREG32(mmRLC_GPM_SCRATCH_DATA, 0); 3800 } 3801 if (adev->gfx.rlc.reg_list) { 3802 WREG32(mmRLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET); 3803 for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) 3804 WREG32(mmRLC_GPM_SCRATCH_DATA, adev->gfx.rlc.reg_list[i]); 3805 } 3806 3807 orig = data = RREG32(mmRLC_PG_CNTL); 3808 data |= RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK; 3809 if (orig != data) 3810 WREG32(mmRLC_PG_CNTL, data); 3811 3812 WREG32(mmRLC_SAVE_AND_RESTORE_BASE, adev->gfx.rlc.save_restore_gpu_addr >> 8); 3813 WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8); 3814 3815 data = RREG32(mmCP_RB_WPTR_POLL_CNTL); 3816 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK; 3817 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 3818 WREG32(mmCP_RB_WPTR_POLL_CNTL, data); 3819 3820 data = 0x10101010; 3821 WREG32(mmRLC_PG_DELAY, data); 3822 3823 data = RREG32(mmRLC_PG_DELAY_2); 3824 data &= ~0xff; 3825 data |= 0x3; 3826 WREG32(mmRLC_PG_DELAY_2, data); 3827 3828 data = RREG32(mmRLC_AUTO_PG_CTRL); 3829 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK; 3830 data |= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT); 3831 WREG32(mmRLC_AUTO_PG_CTRL, data); 3832 3833 } 3834 3835 static void gfx_v7_0_update_gfx_pg(struct amdgpu_device *adev, bool enable) 3836 { 3837 gfx_v7_0_enable_gfx_cgpg(adev, enable); 3838 gfx_v7_0_enable_gfx_static_mgpg(adev, enable); 3839 gfx_v7_0_enable_gfx_dynamic_mgpg(adev, enable); 3840 } 3841 3842 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev) 3843 { 3844 u32 count = 0; 3845 const struct cs_section_def *sect = NULL; 3846 const struct cs_extent_def *ext = NULL; 3847 3848 if (adev->gfx.rlc.cs_data == NULL) 3849 return 0; 3850 3851 /* begin clear state */ 3852 count += 2; 3853 /* context control state */ 3854 count += 3; 3855 3856 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 3857 for (ext = sect->section; ext->extent != NULL; ++ext) { 3858 if (sect->id == SECT_CONTEXT) 3859 count += 2 + ext->reg_count; 3860 else 3861 return 0; 3862 } 3863 } 3864 /* pa_sc_raster_config/pa_sc_raster_config1 */ 3865 count += 4; 3866 /* end clear state */ 3867 count += 2; 3868 /* clear state */ 3869 count += 2; 3870 3871 return count; 3872 } 3873 3874 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, 3875 volatile u32 *buffer) 3876 { 3877 u32 count = 0, i; 3878 const struct cs_section_def *sect = NULL; 3879 const struct cs_extent_def *ext = NULL; 3880 3881 if (adev->gfx.rlc.cs_data == NULL) 3882 return; 3883 if (buffer == NULL) 3884 return; 3885 3886 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 3887 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 3888 3889 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); 3890 buffer[count++] = cpu_to_le32(0x80000000); 3891 buffer[count++] = cpu_to_le32(0x80000000); 3892 3893 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { 3894 for (ext = sect->section; ext->extent != NULL; ++ext) { 3895 if (sect->id == SECT_CONTEXT) { 3896 buffer[count++] = 3897 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); 3898 buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START); 3899 for (i = 0; i < ext->reg_count; i++) 3900 buffer[count++] = cpu_to_le32(ext->extent[i]); 3901 } else { 3902 return; 3903 } 3904 } 3905 } 3906 3907 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 3908 buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); 3909 switch (adev->asic_type) { 3910 case CHIP_BONAIRE: 3911 buffer[count++] = cpu_to_le32(0x16000012); 3912 buffer[count++] = cpu_to_le32(0x00000000); 3913 break; 3914 case CHIP_KAVERI: 3915 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ 3916 buffer[count++] = cpu_to_le32(0x00000000); 3917 break; 3918 case CHIP_KABINI: 3919 case CHIP_MULLINS: 3920 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ 3921 buffer[count++] = cpu_to_le32(0x00000000); 3922 break; 3923 case CHIP_HAWAII: 3924 buffer[count++] = cpu_to_le32(0x3a00161a); 3925 buffer[count++] = cpu_to_le32(0x0000002e); 3926 break; 3927 default: 3928 buffer[count++] = cpu_to_le32(0x00000000); 3929 buffer[count++] = cpu_to_le32(0x00000000); 3930 break; 3931 } 3932 3933 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 3934 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); 3935 3936 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); 3937 buffer[count++] = cpu_to_le32(0); 3938 } 3939 3940 static void gfx_v7_0_init_pg(struct amdgpu_device *adev) 3941 { 3942 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | 3943 AMD_PG_SUPPORT_GFX_SMG | 3944 AMD_PG_SUPPORT_GFX_DMG | 3945 AMD_PG_SUPPORT_CP | 3946 AMD_PG_SUPPORT_GDS | 3947 AMD_PG_SUPPORT_RLC_SMU_HS)) { 3948 gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true); 3949 gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true); 3950 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) { 3951 gfx_v7_0_init_gfx_cgpg(adev); 3952 gfx_v7_0_enable_cp_pg(adev, true); 3953 gfx_v7_0_enable_gds_pg(adev, true); 3954 } 3955 gfx_v7_0_init_ao_cu_mask(adev); 3956 gfx_v7_0_update_gfx_pg(adev, true); 3957 } 3958 } 3959 3960 static void gfx_v7_0_fini_pg(struct amdgpu_device *adev) 3961 { 3962 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | 3963 AMD_PG_SUPPORT_GFX_SMG | 3964 AMD_PG_SUPPORT_GFX_DMG | 3965 AMD_PG_SUPPORT_CP | 3966 AMD_PG_SUPPORT_GDS | 3967 AMD_PG_SUPPORT_RLC_SMU_HS)) { 3968 gfx_v7_0_update_gfx_pg(adev, false); 3969 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) { 3970 gfx_v7_0_enable_cp_pg(adev, false); 3971 gfx_v7_0_enable_gds_pg(adev, false); 3972 } 3973 } 3974 } 3975 3976 /** 3977 * gfx_v7_0_get_gpu_clock_counter - return GPU clock counter snapshot 3978 * 3979 * @adev: amdgpu_device pointer 3980 * 3981 * Fetches a GPU clock counter snapshot (SI). 3982 * Returns the 64 bit clock counter snapshot. 3983 */ 3984 static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev) 3985 { 3986 uint64_t clock; 3987 3988 mutex_lock(&adev->gfx.gpu_clock_mutex); 3989 WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); 3990 clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) | 3991 ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); 3992 mutex_unlock(&adev->gfx.gpu_clock_mutex); 3993 return clock; 3994 } 3995 3996 static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring, 3997 uint32_t vmid, 3998 uint32_t gds_base, uint32_t gds_size, 3999 uint32_t gws_base, uint32_t gws_size, 4000 uint32_t oa_base, uint32_t oa_size) 4001 { 4002 /* GDS Base */ 4003 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4004 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 4005 WRITE_DATA_DST_SEL(0))); 4006 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base); 4007 amdgpu_ring_write(ring, 0); 4008 amdgpu_ring_write(ring, gds_base); 4009 4010 /* GDS Size */ 4011 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4012 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 4013 WRITE_DATA_DST_SEL(0))); 4014 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size); 4015 amdgpu_ring_write(ring, 0); 4016 amdgpu_ring_write(ring, gds_size); 4017 4018 /* GWS */ 4019 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4020 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 4021 WRITE_DATA_DST_SEL(0))); 4022 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws); 4023 amdgpu_ring_write(ring, 0); 4024 amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); 4025 4026 /* OA */ 4027 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4028 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 4029 WRITE_DATA_DST_SEL(0))); 4030 amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa); 4031 amdgpu_ring_write(ring, 0); 4032 amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); 4033 } 4034 4035 static void gfx_v7_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid) 4036 { 4037 struct amdgpu_device *adev = ring->adev; 4038 uint32_t value = 0; 4039 4040 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); 4041 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 4042 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 4043 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 4044 WREG32(mmSQ_CMD, value); 4045 } 4046 4047 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) 4048 { 4049 WREG32(mmSQ_IND_INDEX, 4050 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 4051 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | 4052 (address << SQ_IND_INDEX__INDEX__SHIFT) | 4053 (SQ_IND_INDEX__FORCE_READ_MASK)); 4054 return RREG32(mmSQ_IND_DATA); 4055 } 4056 4057 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, 4058 uint32_t wave, uint32_t thread, 4059 uint32_t regno, uint32_t num, uint32_t *out) 4060 { 4061 WREG32(mmSQ_IND_INDEX, 4062 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | 4063 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | 4064 (regno << SQ_IND_INDEX__INDEX__SHIFT) | 4065 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) | 4066 (SQ_IND_INDEX__FORCE_READ_MASK) | 4067 (SQ_IND_INDEX__AUTO_INCR_MASK)); 4068 while (num--) 4069 *(out++) = RREG32(mmSQ_IND_DATA); 4070 } 4071 4072 static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) 4073 { 4074 /* type 0 wave data */ 4075 dst[(*no_fields)++] = 0; 4076 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS); 4077 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO); 4078 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI); 4079 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO); 4080 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI); 4081 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID); 4082 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0); 4083 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1); 4084 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC); 4085 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC); 4086 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS); 4087 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS); 4088 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO); 4089 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI); 4090 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO); 4091 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI); 4092 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); 4093 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); 4094 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE); 4095 } 4096 4097 static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, 4098 uint32_t wave, uint32_t start, 4099 uint32_t size, uint32_t *dst) 4100 { 4101 wave_read_regs( 4102 adev, simd, wave, 0, 4103 start + SQIND_WAVE_SGPRS_OFFSET, size, dst); 4104 } 4105 4106 static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device *adev, 4107 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) 4108 { 4109 cik_srbm_select(adev, me, pipe, q, vm); 4110 } 4111 4112 static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { 4113 .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, 4114 .select_se_sh = &gfx_v7_0_select_se_sh, 4115 .read_wave_data = &gfx_v7_0_read_wave_data, 4116 .read_wave_sgprs = &gfx_v7_0_read_wave_sgprs, 4117 .select_me_pipe_q = &gfx_v7_0_select_me_pipe_q 4118 }; 4119 4120 static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { 4121 .is_rlc_enabled = gfx_v7_0_is_rlc_enabled, 4122 .set_safe_mode = gfx_v7_0_set_safe_mode, 4123 .unset_safe_mode = gfx_v7_0_unset_safe_mode, 4124 .init = gfx_v7_0_rlc_init, 4125 .get_csb_size = gfx_v7_0_get_csb_size, 4126 .get_csb_buffer = gfx_v7_0_get_csb_buffer, 4127 .get_cp_table_num = gfx_v7_0_cp_pg_table_num, 4128 .resume = gfx_v7_0_rlc_resume, 4129 .stop = gfx_v7_0_rlc_stop, 4130 .reset = gfx_v7_0_rlc_reset, 4131 .start = gfx_v7_0_rlc_start, 4132 .update_spm_vmid = gfx_v7_0_update_spm_vmid 4133 }; 4134 4135 static int gfx_v7_0_early_init(void *handle) 4136 { 4137 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4138 4139 adev->gfx.xcc_mask = 1; 4140 adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS; 4141 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), 4142 AMDGPU_MAX_COMPUTE_RINGS); 4143 adev->gfx.funcs = &gfx_v7_0_gfx_funcs; 4144 adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs; 4145 gfx_v7_0_set_ring_funcs(adev); 4146 gfx_v7_0_set_irq_funcs(adev); 4147 gfx_v7_0_set_gds_init(adev); 4148 4149 return 0; 4150 } 4151 4152 static int gfx_v7_0_late_init(void *handle) 4153 { 4154 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4155 int r; 4156 4157 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); 4158 if (r) 4159 return r; 4160 4161 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); 4162 if (r) 4163 return r; 4164 4165 return 0; 4166 } 4167 4168 static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev) 4169 { 4170 u32 gb_addr_config; 4171 u32 mc_arb_ramcfg; 4172 u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; 4173 u32 tmp; 4174 4175 switch (adev->asic_type) { 4176 case CHIP_BONAIRE: 4177 adev->gfx.config.max_shader_engines = 2; 4178 adev->gfx.config.max_tile_pipes = 4; 4179 adev->gfx.config.max_cu_per_sh = 7; 4180 adev->gfx.config.max_sh_per_se = 1; 4181 adev->gfx.config.max_backends_per_se = 2; 4182 adev->gfx.config.max_texture_channel_caches = 4; 4183 adev->gfx.config.max_gprs = 256; 4184 adev->gfx.config.max_gs_threads = 32; 4185 adev->gfx.config.max_hw_contexts = 8; 4186 4187 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 4188 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 4189 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 4190 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 4191 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 4192 break; 4193 case CHIP_HAWAII: 4194 adev->gfx.config.max_shader_engines = 4; 4195 adev->gfx.config.max_tile_pipes = 16; 4196 adev->gfx.config.max_cu_per_sh = 11; 4197 adev->gfx.config.max_sh_per_se = 1; 4198 adev->gfx.config.max_backends_per_se = 4; 4199 adev->gfx.config.max_texture_channel_caches = 16; 4200 adev->gfx.config.max_gprs = 256; 4201 adev->gfx.config.max_gs_threads = 32; 4202 adev->gfx.config.max_hw_contexts = 8; 4203 4204 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 4205 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 4206 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 4207 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 4208 gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN; 4209 break; 4210 case CHIP_KAVERI: 4211 adev->gfx.config.max_shader_engines = 1; 4212 adev->gfx.config.max_tile_pipes = 4; 4213 adev->gfx.config.max_cu_per_sh = 8; 4214 adev->gfx.config.max_backends_per_se = 2; 4215 adev->gfx.config.max_sh_per_se = 1; 4216 adev->gfx.config.max_texture_channel_caches = 4; 4217 adev->gfx.config.max_gprs = 256; 4218 adev->gfx.config.max_gs_threads = 16; 4219 adev->gfx.config.max_hw_contexts = 8; 4220 4221 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 4222 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 4223 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 4224 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 4225 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 4226 break; 4227 case CHIP_KABINI: 4228 case CHIP_MULLINS: 4229 default: 4230 adev->gfx.config.max_shader_engines = 1; 4231 adev->gfx.config.max_tile_pipes = 2; 4232 adev->gfx.config.max_cu_per_sh = 2; 4233 adev->gfx.config.max_sh_per_se = 1; 4234 adev->gfx.config.max_backends_per_se = 1; 4235 adev->gfx.config.max_texture_channel_caches = 2; 4236 adev->gfx.config.max_gprs = 256; 4237 adev->gfx.config.max_gs_threads = 16; 4238 adev->gfx.config.max_hw_contexts = 8; 4239 4240 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 4241 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; 4242 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; 4243 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; 4244 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; 4245 break; 4246 } 4247 4248 adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); 4249 mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; 4250 4251 adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg, 4252 MC_ARB_RAMCFG, NOOFBANK); 4253 adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg, 4254 MC_ARB_RAMCFG, NOOFRANKS); 4255 4256 adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; 4257 adev->gfx.config.mem_max_burst_length_bytes = 256; 4258 if (adev->flags & AMD_IS_APU) { 4259 /* Get memory bank mapping mode. */ 4260 tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); 4261 dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); 4262 dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP); 4263 4264 tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING); 4265 dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP); 4266 dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP); 4267 4268 /* Validate settings in case only one DIMM installed. */ 4269 if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12)) 4270 dimm00_addr_map = 0; 4271 if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12)) 4272 dimm01_addr_map = 0; 4273 if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12)) 4274 dimm10_addr_map = 0; 4275 if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12)) 4276 dimm11_addr_map = 0; 4277 4278 /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */ 4279 /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */ 4280 if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11)) 4281 adev->gfx.config.mem_row_size_in_kb = 2; 4282 else 4283 adev->gfx.config.mem_row_size_in_kb = 1; 4284 } else { 4285 tmp = (mc_arb_ramcfg & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT; 4286 adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 4287 if (adev->gfx.config.mem_row_size_in_kb > 4) 4288 adev->gfx.config.mem_row_size_in_kb = 4; 4289 } 4290 /* XXX use MC settings? */ 4291 adev->gfx.config.shader_engine_tile_size = 32; 4292 adev->gfx.config.num_gpus = 1; 4293 adev->gfx.config.multi_gpu_tile_size = 64; 4294 4295 /* fix up row size */ 4296 gb_addr_config &= ~GB_ADDR_CONFIG__ROW_SIZE_MASK; 4297 switch (adev->gfx.config.mem_row_size_in_kb) { 4298 case 1: 4299 default: 4300 gb_addr_config |= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); 4301 break; 4302 case 2: 4303 gb_addr_config |= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); 4304 break; 4305 case 4: 4306 gb_addr_config |= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT); 4307 break; 4308 } 4309 adev->gfx.config.gb_addr_config = gb_addr_config; 4310 } 4311 4312 static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, 4313 int mec, int pipe, int queue) 4314 { 4315 int r; 4316 unsigned irq_type; 4317 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; 4318 4319 /* mec0 is me1 */ 4320 ring->me = mec + 1; 4321 ring->pipe = pipe; 4322 ring->queue = queue; 4323 4324 ring->ring_obj = NULL; 4325 ring->use_doorbell = true; 4326 ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id; 4327 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); 4328 4329 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP 4330 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) 4331 + ring->pipe; 4332 4333 /* type-2 packets are deprecated on MEC, use type-3 instead */ 4334 r = amdgpu_ring_init(adev, ring, 1024, 4335 &adev->gfx.eop_irq, irq_type, 4336 AMDGPU_RING_PRIO_DEFAULT, NULL); 4337 if (r) 4338 return r; 4339 4340 4341 return 0; 4342 } 4343 4344 static int gfx_v7_0_sw_init(void *handle) 4345 { 4346 struct amdgpu_ring *ring; 4347 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4348 int i, j, k, r, ring_id; 4349 4350 switch (adev->asic_type) { 4351 case CHIP_KAVERI: 4352 adev->gfx.mec.num_mec = 2; 4353 break; 4354 case CHIP_BONAIRE: 4355 case CHIP_HAWAII: 4356 case CHIP_KABINI: 4357 case CHIP_MULLINS: 4358 default: 4359 adev->gfx.mec.num_mec = 1; 4360 break; 4361 } 4362 adev->gfx.mec.num_pipe_per_mec = 4; 4363 adev->gfx.mec.num_queue_per_pipe = 8; 4364 4365 /* EOP Event */ 4366 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq); 4367 if (r) 4368 return r; 4369 4370 /* Privileged reg */ 4371 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184, 4372 &adev->gfx.priv_reg_irq); 4373 if (r) 4374 return r; 4375 4376 /* Privileged inst */ 4377 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185, 4378 &adev->gfx.priv_inst_irq); 4379 if (r) 4380 return r; 4381 4382 r = gfx_v7_0_init_microcode(adev); 4383 if (r) { 4384 DRM_ERROR("Failed to load gfx firmware!\n"); 4385 return r; 4386 } 4387 4388 r = adev->gfx.rlc.funcs->init(adev); 4389 if (r) { 4390 DRM_ERROR("Failed to init rlc BOs!\n"); 4391 return r; 4392 } 4393 4394 /* allocate mec buffers */ 4395 r = gfx_v7_0_mec_init(adev); 4396 if (r) { 4397 DRM_ERROR("Failed to init MEC BOs!\n"); 4398 return r; 4399 } 4400 4401 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4402 ring = &adev->gfx.gfx_ring[i]; 4403 ring->ring_obj = NULL; 4404 sprintf(ring->name, "gfx"); 4405 r = amdgpu_ring_init(adev, ring, 1024, 4406 &adev->gfx.eop_irq, 4407 AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, 4408 AMDGPU_RING_PRIO_DEFAULT, NULL); 4409 if (r) 4410 return r; 4411 } 4412 4413 /* set up the compute queues - allocate horizontally across pipes */ 4414 ring_id = 0; 4415 for (i = 0; i < adev->gfx.mec.num_mec; ++i) { 4416 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { 4417 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { 4418 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i, 4419 k, j)) 4420 continue; 4421 4422 r = gfx_v7_0_compute_ring_init(adev, 4423 ring_id, 4424 i, k, j); 4425 if (r) 4426 return r; 4427 4428 ring_id++; 4429 } 4430 } 4431 } 4432 4433 adev->gfx.ce_ram_size = 0x8000; 4434 4435 gfx_v7_0_gpu_early_init(adev); 4436 4437 return r; 4438 } 4439 4440 static int gfx_v7_0_sw_fini(void *handle) 4441 { 4442 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4443 int i; 4444 4445 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 4446 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); 4447 for (i = 0; i < adev->gfx.num_compute_rings; i++) 4448 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 4449 4450 gfx_v7_0_cp_compute_fini(adev); 4451 amdgpu_gfx_rlc_fini(adev); 4452 gfx_v7_0_mec_fini(adev); 4453 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, 4454 &adev->gfx.rlc.clear_state_gpu_addr, 4455 (void **)&adev->gfx.rlc.cs_ptr); 4456 if (adev->gfx.rlc.cp_table_size) { 4457 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, 4458 &adev->gfx.rlc.cp_table_gpu_addr, 4459 (void **)&adev->gfx.rlc.cp_table_ptr); 4460 } 4461 gfx_v7_0_free_microcode(adev); 4462 4463 return 0; 4464 } 4465 4466 static int gfx_v7_0_hw_init(void *handle) 4467 { 4468 int r; 4469 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4470 4471 gfx_v7_0_constants_init(adev); 4472 4473 /* init CSB */ 4474 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr); 4475 /* init rlc */ 4476 r = adev->gfx.rlc.funcs->resume(adev); 4477 if (r) 4478 return r; 4479 4480 r = gfx_v7_0_cp_resume(adev); 4481 if (r) 4482 return r; 4483 4484 return r; 4485 } 4486 4487 static int gfx_v7_0_hw_fini(void *handle) 4488 { 4489 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4490 4491 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); 4492 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); 4493 gfx_v7_0_cp_enable(adev, false); 4494 adev->gfx.rlc.funcs->stop(adev); 4495 gfx_v7_0_fini_pg(adev); 4496 4497 return 0; 4498 } 4499 4500 static int gfx_v7_0_suspend(void *handle) 4501 { 4502 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4503 4504 return gfx_v7_0_hw_fini(adev); 4505 } 4506 4507 static int gfx_v7_0_resume(void *handle) 4508 { 4509 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4510 4511 return gfx_v7_0_hw_init(adev); 4512 } 4513 4514 static bool gfx_v7_0_is_idle(void *handle) 4515 { 4516 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4517 4518 if (RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK) 4519 return false; 4520 else 4521 return true; 4522 } 4523 4524 static int gfx_v7_0_wait_for_idle(void *handle) 4525 { 4526 unsigned i; 4527 u32 tmp; 4528 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4529 4530 for (i = 0; i < adev->usec_timeout; i++) { 4531 /* read MC_STATUS */ 4532 tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK; 4533 4534 if (!tmp) 4535 return 0; 4536 udelay(1); 4537 } 4538 return -ETIMEDOUT; 4539 } 4540 4541 static int gfx_v7_0_soft_reset(void *handle) 4542 { 4543 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 4544 u32 tmp; 4545 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4546 4547 /* GRBM_STATUS */ 4548 tmp = RREG32(mmGRBM_STATUS); 4549 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | 4550 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | 4551 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | 4552 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | 4553 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | 4554 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) 4555 grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK | 4556 GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK; 4557 4558 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { 4559 grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK; 4560 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK; 4561 } 4562 4563 /* GRBM_STATUS2 */ 4564 tmp = RREG32(mmGRBM_STATUS2); 4565 if (tmp & GRBM_STATUS2__RLC_BUSY_MASK) 4566 grbm_soft_reset |= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK; 4567 4568 /* SRBM_STATUS */ 4569 tmp = RREG32(mmSRBM_STATUS); 4570 if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK) 4571 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK; 4572 4573 if (grbm_soft_reset || srbm_soft_reset) { 4574 /* disable CG/PG */ 4575 gfx_v7_0_fini_pg(adev); 4576 gfx_v7_0_update_cg(adev, false); 4577 4578 /* stop the rlc */ 4579 adev->gfx.rlc.funcs->stop(adev); 4580 4581 /* Disable GFX parsing/prefetching */ 4582 WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); 4583 4584 /* Disable MEC parsing/prefetching */ 4585 WREG32(mmCP_MEC_CNTL, CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK); 4586 4587 if (grbm_soft_reset) { 4588 tmp = RREG32(mmGRBM_SOFT_RESET); 4589 tmp |= grbm_soft_reset; 4590 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); 4591 WREG32(mmGRBM_SOFT_RESET, tmp); 4592 tmp = RREG32(mmGRBM_SOFT_RESET); 4593 4594 udelay(50); 4595 4596 tmp &= ~grbm_soft_reset; 4597 WREG32(mmGRBM_SOFT_RESET, tmp); 4598 tmp = RREG32(mmGRBM_SOFT_RESET); 4599 } 4600 4601 if (srbm_soft_reset) { 4602 tmp = RREG32(mmSRBM_SOFT_RESET); 4603 tmp |= srbm_soft_reset; 4604 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 4605 WREG32(mmSRBM_SOFT_RESET, tmp); 4606 tmp = RREG32(mmSRBM_SOFT_RESET); 4607 4608 udelay(50); 4609 4610 tmp &= ~srbm_soft_reset; 4611 WREG32(mmSRBM_SOFT_RESET, tmp); 4612 tmp = RREG32(mmSRBM_SOFT_RESET); 4613 } 4614 /* Wait a little for things to settle down */ 4615 udelay(50); 4616 } 4617 return 0; 4618 } 4619 4620 static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 4621 enum amdgpu_interrupt_state state) 4622 { 4623 u32 cp_int_cntl; 4624 4625 switch (state) { 4626 case AMDGPU_IRQ_STATE_DISABLE: 4627 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); 4628 cp_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK; 4629 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); 4630 break; 4631 case AMDGPU_IRQ_STATE_ENABLE: 4632 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); 4633 cp_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK; 4634 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); 4635 break; 4636 default: 4637 break; 4638 } 4639 } 4640 4641 static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, 4642 int me, int pipe, 4643 enum amdgpu_interrupt_state state) 4644 { 4645 u32 mec_int_cntl, mec_int_cntl_reg; 4646 4647 /* 4648 * amdgpu controls only the first MEC. That's why this function only 4649 * handles the setting of interrupts for this specific MEC. All other 4650 * pipes' interrupts are set by amdkfd. 4651 */ 4652 4653 if (me == 1) { 4654 switch (pipe) { 4655 case 0: 4656 mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL; 4657 break; 4658 case 1: 4659 mec_int_cntl_reg = mmCP_ME1_PIPE1_INT_CNTL; 4660 break; 4661 case 2: 4662 mec_int_cntl_reg = mmCP_ME1_PIPE2_INT_CNTL; 4663 break; 4664 case 3: 4665 mec_int_cntl_reg = mmCP_ME1_PIPE3_INT_CNTL; 4666 break; 4667 default: 4668 DRM_DEBUG("invalid pipe %d\n", pipe); 4669 return; 4670 } 4671 } else { 4672 DRM_DEBUG("invalid me %d\n", me); 4673 return; 4674 } 4675 4676 switch (state) { 4677 case AMDGPU_IRQ_STATE_DISABLE: 4678 mec_int_cntl = RREG32(mec_int_cntl_reg); 4679 mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK; 4680 WREG32(mec_int_cntl_reg, mec_int_cntl); 4681 break; 4682 case AMDGPU_IRQ_STATE_ENABLE: 4683 mec_int_cntl = RREG32(mec_int_cntl_reg); 4684 mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK; 4685 WREG32(mec_int_cntl_reg, mec_int_cntl); 4686 break; 4687 default: 4688 break; 4689 } 4690 } 4691 4692 static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device *adev, 4693 struct amdgpu_irq_src *src, 4694 unsigned type, 4695 enum amdgpu_interrupt_state state) 4696 { 4697 u32 cp_int_cntl; 4698 4699 switch (state) { 4700 case AMDGPU_IRQ_STATE_DISABLE: 4701 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); 4702 cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK; 4703 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); 4704 break; 4705 case AMDGPU_IRQ_STATE_ENABLE: 4706 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); 4707 cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK; 4708 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); 4709 break; 4710 default: 4711 break; 4712 } 4713 4714 return 0; 4715 } 4716 4717 static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device *adev, 4718 struct amdgpu_irq_src *src, 4719 unsigned type, 4720 enum amdgpu_interrupt_state state) 4721 { 4722 u32 cp_int_cntl; 4723 4724 switch (state) { 4725 case AMDGPU_IRQ_STATE_DISABLE: 4726 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); 4727 cp_int_cntl &= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK; 4728 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); 4729 break; 4730 case AMDGPU_IRQ_STATE_ENABLE: 4731 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); 4732 cp_int_cntl |= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK; 4733 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); 4734 break; 4735 default: 4736 break; 4737 } 4738 4739 return 0; 4740 } 4741 4742 static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device *adev, 4743 struct amdgpu_irq_src *src, 4744 unsigned type, 4745 enum amdgpu_interrupt_state state) 4746 { 4747 switch (type) { 4748 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP: 4749 gfx_v7_0_set_gfx_eop_interrupt_state(adev, state); 4750 break; 4751 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: 4752 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 0, state); 4753 break; 4754 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: 4755 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 1, state); 4756 break; 4757 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: 4758 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 2, state); 4759 break; 4760 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: 4761 gfx_v7_0_set_compute_eop_interrupt_state(adev, 1, 3, state); 4762 break; 4763 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: 4764 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 0, state); 4765 break; 4766 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: 4767 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 1, state); 4768 break; 4769 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: 4770 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 2, state); 4771 break; 4772 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: 4773 gfx_v7_0_set_compute_eop_interrupt_state(adev, 2, 3, state); 4774 break; 4775 default: 4776 break; 4777 } 4778 return 0; 4779 } 4780 4781 static int gfx_v7_0_eop_irq(struct amdgpu_device *adev, 4782 struct amdgpu_irq_src *source, 4783 struct amdgpu_iv_entry *entry) 4784 { 4785 u8 me_id, pipe_id; 4786 struct amdgpu_ring *ring; 4787 int i; 4788 4789 DRM_DEBUG("IH: CP EOP\n"); 4790 me_id = (entry->ring_id & 0x0c) >> 2; 4791 pipe_id = (entry->ring_id & 0x03) >> 0; 4792 switch (me_id) { 4793 case 0: 4794 amdgpu_fence_process(&adev->gfx.gfx_ring[0]); 4795 break; 4796 case 1: 4797 case 2: 4798 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4799 ring = &adev->gfx.compute_ring[i]; 4800 if ((ring->me == me_id) && (ring->pipe == pipe_id)) 4801 amdgpu_fence_process(ring); 4802 } 4803 break; 4804 } 4805 return 0; 4806 } 4807 4808 static void gfx_v7_0_fault(struct amdgpu_device *adev, 4809 struct amdgpu_iv_entry *entry) 4810 { 4811 struct amdgpu_ring *ring; 4812 u8 me_id, pipe_id; 4813 int i; 4814 4815 me_id = (entry->ring_id & 0x0c) >> 2; 4816 pipe_id = (entry->ring_id & 0x03) >> 0; 4817 switch (me_id) { 4818 case 0: 4819 drm_sched_fault(&adev->gfx.gfx_ring[0].sched); 4820 break; 4821 case 1: 4822 case 2: 4823 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4824 ring = &adev->gfx.compute_ring[i]; 4825 if ((ring->me == me_id) && (ring->pipe == pipe_id)) 4826 drm_sched_fault(&ring->sched); 4827 } 4828 break; 4829 } 4830 } 4831 4832 static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev, 4833 struct amdgpu_irq_src *source, 4834 struct amdgpu_iv_entry *entry) 4835 { 4836 DRM_ERROR("Illegal register access in command stream\n"); 4837 gfx_v7_0_fault(adev, entry); 4838 return 0; 4839 } 4840 4841 static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev, 4842 struct amdgpu_irq_src *source, 4843 struct amdgpu_iv_entry *entry) 4844 { 4845 DRM_ERROR("Illegal instruction in command stream\n"); 4846 // XXX soft reset the gfx block only 4847 gfx_v7_0_fault(adev, entry); 4848 return 0; 4849 } 4850 4851 static int gfx_v7_0_set_clockgating_state(void *handle, 4852 enum amd_clockgating_state state) 4853 { 4854 bool gate = false; 4855 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4856 4857 if (state == AMD_CG_STATE_GATE) 4858 gate = true; 4859 4860 gfx_v7_0_enable_gui_idle_interrupt(adev, false); 4861 /* order matters! */ 4862 if (gate) { 4863 gfx_v7_0_enable_mgcg(adev, true); 4864 gfx_v7_0_enable_cgcg(adev, true); 4865 } else { 4866 gfx_v7_0_enable_cgcg(adev, false); 4867 gfx_v7_0_enable_mgcg(adev, false); 4868 } 4869 gfx_v7_0_enable_gui_idle_interrupt(adev, true); 4870 4871 return 0; 4872 } 4873 4874 static int gfx_v7_0_set_powergating_state(void *handle, 4875 enum amd_powergating_state state) 4876 { 4877 bool gate = false; 4878 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4879 4880 if (state == AMD_PG_STATE_GATE) 4881 gate = true; 4882 4883 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | 4884 AMD_PG_SUPPORT_GFX_SMG | 4885 AMD_PG_SUPPORT_GFX_DMG | 4886 AMD_PG_SUPPORT_CP | 4887 AMD_PG_SUPPORT_GDS | 4888 AMD_PG_SUPPORT_RLC_SMU_HS)) { 4889 gfx_v7_0_update_gfx_pg(adev, gate); 4890 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) { 4891 gfx_v7_0_enable_cp_pg(adev, gate); 4892 gfx_v7_0_enable_gds_pg(adev, gate); 4893 } 4894 } 4895 4896 return 0; 4897 } 4898 4899 static void gfx_v7_0_emit_mem_sync(struct amdgpu_ring *ring) 4900 { 4901 amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 4902 amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA | 4903 PACKET3_TC_ACTION_ENA | 4904 PACKET3_SH_KCACHE_ACTION_ENA | 4905 PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */ 4906 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 4907 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 4908 amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ 4909 } 4910 4911 static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring) 4912 { 4913 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5)); 4914 amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA | 4915 PACKET3_TC_ACTION_ENA | 4916 PACKET3_SH_KCACHE_ACTION_ENA | 4917 PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */ 4918 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 4919 amdgpu_ring_write(ring, 0xff); /* CP_COHER_SIZE_HI */ 4920 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 4921 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 4922 amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ 4923 } 4924 4925 static const struct amd_ip_funcs gfx_v7_0_ip_funcs = { 4926 .name = "gfx_v7_0", 4927 .early_init = gfx_v7_0_early_init, 4928 .late_init = gfx_v7_0_late_init, 4929 .sw_init = gfx_v7_0_sw_init, 4930 .sw_fini = gfx_v7_0_sw_fini, 4931 .hw_init = gfx_v7_0_hw_init, 4932 .hw_fini = gfx_v7_0_hw_fini, 4933 .suspend = gfx_v7_0_suspend, 4934 .resume = gfx_v7_0_resume, 4935 .is_idle = gfx_v7_0_is_idle, 4936 .wait_for_idle = gfx_v7_0_wait_for_idle, 4937 .soft_reset = gfx_v7_0_soft_reset, 4938 .set_clockgating_state = gfx_v7_0_set_clockgating_state, 4939 .set_powergating_state = gfx_v7_0_set_powergating_state, 4940 .dump_ip_state = NULL, 4941 .print_ip_state = NULL, 4942 }; 4943 4944 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { 4945 .type = AMDGPU_RING_TYPE_GFX, 4946 .align_mask = 0xff, 4947 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4948 .support_64bit_ptrs = false, 4949 .get_rptr = gfx_v7_0_ring_get_rptr, 4950 .get_wptr = gfx_v7_0_ring_get_wptr_gfx, 4951 .set_wptr = gfx_v7_0_ring_set_wptr_gfx, 4952 .emit_frame_size = 4953 20 + /* gfx_v7_0_ring_emit_gds_switch */ 4954 7 + /* gfx_v7_0_ring_emit_hdp_flush */ 4955 5 + /* hdp invalidate */ 4956 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */ 4957 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */ 4958 CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */ 4959 3 + 4 + /* gfx_v7_ring_emit_cntxcntl including vgt flush*/ 4960 5, /* SURFACE_SYNC */ 4961 .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */ 4962 .emit_ib = gfx_v7_0_ring_emit_ib_gfx, 4963 .emit_fence = gfx_v7_0_ring_emit_fence_gfx, 4964 .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, 4965 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, 4966 .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, 4967 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, 4968 .test_ring = gfx_v7_0_ring_test_ring, 4969 .test_ib = gfx_v7_0_ring_test_ib, 4970 .insert_nop = amdgpu_ring_insert_nop, 4971 .pad_ib = amdgpu_ring_generic_pad_ib, 4972 .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl, 4973 .emit_wreg = gfx_v7_0_ring_emit_wreg, 4974 .soft_recovery = gfx_v7_0_ring_soft_recovery, 4975 .emit_mem_sync = gfx_v7_0_emit_mem_sync, 4976 }; 4977 4978 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { 4979 .type = AMDGPU_RING_TYPE_COMPUTE, 4980 .align_mask = 0xff, 4981 .nop = PACKET3(PACKET3_NOP, 0x3FFF), 4982 .support_64bit_ptrs = false, 4983 .get_rptr = gfx_v7_0_ring_get_rptr, 4984 .get_wptr = gfx_v7_0_ring_get_wptr_compute, 4985 .set_wptr = gfx_v7_0_ring_set_wptr_compute, 4986 .emit_frame_size = 4987 20 + /* gfx_v7_0_ring_emit_gds_switch */ 4988 7 + /* gfx_v7_0_ring_emit_hdp_flush */ 4989 5 + /* hdp invalidate */ 4990 7 + /* gfx_v7_0_ring_emit_pipeline_sync */ 4991 CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */ 4992 7 + 7 + 7 + /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */ 4993 7, /* gfx_v7_0_emit_mem_sync_compute */ 4994 .emit_ib_size = 7, /* gfx_v7_0_ring_emit_ib_compute */ 4995 .emit_ib = gfx_v7_0_ring_emit_ib_compute, 4996 .emit_fence = gfx_v7_0_ring_emit_fence_compute, 4997 .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, 4998 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, 4999 .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, 5000 .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, 5001 .test_ring = gfx_v7_0_ring_test_ring, 5002 .test_ib = gfx_v7_0_ring_test_ib, 5003 .insert_nop = amdgpu_ring_insert_nop, 5004 .pad_ib = amdgpu_ring_generic_pad_ib, 5005 .emit_wreg = gfx_v7_0_ring_emit_wreg, 5006 .emit_mem_sync = gfx_v7_0_emit_mem_sync_compute, 5007 }; 5008 5009 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) 5010 { 5011 int i; 5012 5013 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 5014 adev->gfx.gfx_ring[i].funcs = &gfx_v7_0_ring_funcs_gfx; 5015 for (i = 0; i < adev->gfx.num_compute_rings; i++) 5016 adev->gfx.compute_ring[i].funcs = &gfx_v7_0_ring_funcs_compute; 5017 } 5018 5019 static const struct amdgpu_irq_src_funcs gfx_v7_0_eop_irq_funcs = { 5020 .set = gfx_v7_0_set_eop_interrupt_state, 5021 .process = gfx_v7_0_eop_irq, 5022 }; 5023 5024 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_reg_irq_funcs = { 5025 .set = gfx_v7_0_set_priv_reg_fault_state, 5026 .process = gfx_v7_0_priv_reg_irq, 5027 }; 5028 5029 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_inst_irq_funcs = { 5030 .set = gfx_v7_0_set_priv_inst_fault_state, 5031 .process = gfx_v7_0_priv_inst_irq, 5032 }; 5033 5034 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev) 5035 { 5036 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; 5037 adev->gfx.eop_irq.funcs = &gfx_v7_0_eop_irq_funcs; 5038 5039 adev->gfx.priv_reg_irq.num_types = 1; 5040 adev->gfx.priv_reg_irq.funcs = &gfx_v7_0_priv_reg_irq_funcs; 5041 5042 adev->gfx.priv_inst_irq.num_types = 1; 5043 adev->gfx.priv_inst_irq.funcs = &gfx_v7_0_priv_inst_irq_funcs; 5044 } 5045 5046 static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev) 5047 { 5048 /* init asci gds info */ 5049 adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE); 5050 adev->gds.gws_size = 64; 5051 adev->gds.oa_size = 16; 5052 adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID); 5053 } 5054 5055 5056 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) 5057 { 5058 int i, j, k, counter, active_cu_number = 0; 5059 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; 5060 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; 5061 unsigned disable_masks[4 * 2]; 5062 u32 ao_cu_num; 5063 5064 if (adev->flags & AMD_IS_APU) 5065 ao_cu_num = 2; 5066 else 5067 ao_cu_num = adev->gfx.config.max_cu_per_sh; 5068 5069 memset(cu_info, 0, sizeof(*cu_info)); 5070 5071 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); 5072 5073 mutex_lock(&adev->grbm_idx_mutex); 5074 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 5075 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 5076 mask = 1; 5077 ao_bitmap = 0; 5078 counter = 0; 5079 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff, 0); 5080 if (i < 4 && j < 2) 5081 gfx_v7_0_set_user_cu_inactive_bitmap( 5082 adev, disable_masks[i * 2 + j]); 5083 bitmap = gfx_v7_0_get_cu_active_bitmap(adev); 5084 cu_info->bitmap[0][i][j] = bitmap; 5085 5086 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { 5087 if (bitmap & mask) { 5088 if (counter < ao_cu_num) 5089 ao_bitmap |= mask; 5090 counter++; 5091 } 5092 mask <<= 1; 5093 } 5094 active_cu_number += counter; 5095 if (i < 2 && j < 2) 5096 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); 5097 cu_info->ao_cu_bitmap[i][j] = ao_bitmap; 5098 } 5099 } 5100 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); 5101 mutex_unlock(&adev->grbm_idx_mutex); 5102 5103 cu_info->number = active_cu_number; 5104 cu_info->ao_cu_mask = ao_cu_mask; 5105 cu_info->simd_per_cu = NUM_SIMD_PER_CU; 5106 cu_info->max_waves_per_simd = 10; 5107 cu_info->max_scratch_slots_per_cu = 32; 5108 cu_info->wave_front_size = 64; 5109 cu_info->lds_size = 64; 5110 } 5111 5112 const struct amdgpu_ip_block_version gfx_v7_1_ip_block = { 5113 .type = AMD_IP_BLOCK_TYPE_GFX, 5114 .major = 7, 5115 .minor = 1, 5116 .rev = 0, 5117 .funcs = &gfx_v7_0_ip_funcs, 5118 }; 5119 5120 const struct amdgpu_ip_block_version gfx_v7_2_ip_block = { 5121 .type = AMD_IP_BLOCK_TYPE_GFX, 5122 .major = 7, 5123 .minor = 2, 5124 .rev = 0, 5125 .funcs = &gfx_v7_0_ip_funcs, 5126 }; 5127 5128 const struct amdgpu_ip_block_version gfx_v7_3_ip_block = { 5129 .type = AMD_IP_BLOCK_TYPE_GFX, 5130 .major = 7, 5131 .minor = 3, 5132 .rev = 0, 5133 .funcs = &gfx_v7_0_ip_funcs, 5134 }; 5135