1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "intel_drv.h" 26 27 #define FORCEWAKE_ACK_TIMEOUT_MS 2 28 29 #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) 30 #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__)) 31 32 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) 33 #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__)) 34 35 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 36 #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__)) 37 38 #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__)) 39 #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__)) 40 41 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) 42 43 static void 44 assert_device_not_suspended(struct drm_i915_private *dev_priv) 45 { 46 WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, 47 "Device suspended\n"); 48 } 49 50 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 51 { 52 /* w/a for a sporadic read returning 0 by waiting for the GT 53 * thread to wake up. 54 */ 55 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & 56 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500)) 57 DRM_ERROR("GT thread status wait timed out\n"); 58 } 59 60 static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) 61 { 62 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 63 /* something from same cacheline, but !FORCEWAKE */ 64 __raw_posting_read(dev_priv, ECOBUS); 65 } 66 67 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, 68 int fw_engine) 69 { 70 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0, 71 FORCEWAKE_ACK_TIMEOUT_MS)) 72 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 73 74 __raw_i915_write32(dev_priv, FORCEWAKE, 1); 75 /* something from same cacheline, but !FORCEWAKE */ 76 __raw_posting_read(dev_priv, ECOBUS); 77 78 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1), 79 FORCEWAKE_ACK_TIMEOUT_MS)) 80 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 81 82 /* WaRsForcewakeWaitTC0:snb */ 83 __gen6_gt_wait_for_thread_c0(dev_priv); 84 } 85 86 static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) 87 { 88 __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); 89 /* something from same cacheline, but !FORCEWAKE_MT */ 90 __raw_posting_read(dev_priv, ECOBUS); 91 } 92 93 static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv, 94 int fw_engine) 95 { 96 u32 forcewake_ack; 97 98 if (IS_HASWELL(dev_priv->dev) || IS_BROADWELL(dev_priv->dev)) 99 forcewake_ack = FORCEWAKE_ACK_HSW; 100 else 101 forcewake_ack = FORCEWAKE_MT_ACK; 102 103 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0, 104 FORCEWAKE_ACK_TIMEOUT_MS)) 105 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 106 107 __raw_i915_write32(dev_priv, FORCEWAKE_MT, 108 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 109 /* something from same cacheline, but !FORCEWAKE_MT */ 110 __raw_posting_read(dev_priv, ECOBUS); 111 112 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL), 113 FORCEWAKE_ACK_TIMEOUT_MS)) 114 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 115 116 /* WaRsForcewakeWaitTC0:ivb,hsw */ 117 __gen6_gt_wait_for_thread_c0(dev_priv); 118 } 119 120 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 121 { 122 u32 gtfifodbg; 123 124 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); 125 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg)) 126 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg); 127 } 128 129 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, 130 int fw_engine) 131 { 132 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 133 /* something from same cacheline, but !FORCEWAKE */ 134 __raw_posting_read(dev_priv, ECOBUS); 135 gen6_gt_check_fifodbg(dev_priv); 136 } 137 138 static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv, 139 int fw_engine) 140 { 141 __raw_i915_write32(dev_priv, FORCEWAKE_MT, 142 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 143 /* something from same cacheline, but !FORCEWAKE_MT */ 144 __raw_posting_read(dev_priv, ECOBUS); 145 146 if (IS_GEN7(dev_priv->dev)) 147 gen6_gt_check_fifodbg(dev_priv); 148 } 149 150 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 151 { 152 int ret = 0; 153 154 /* On VLV, FIFO will be shared by both SW and HW. 155 * So, we need to read the FREE_ENTRIES everytime */ 156 if (IS_VALLEYVIEW(dev_priv->dev)) 157 dev_priv->uncore.fifo_count = 158 __raw_i915_read32(dev_priv, GTFIFOCTL) & 159 GT_FIFO_FREE_ENTRIES_MASK; 160 161 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 162 int loop = 500; 163 u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; 164 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 165 udelay(10); 166 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; 167 } 168 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 169 ++ret; 170 dev_priv->uncore.fifo_count = fifo; 171 } 172 dev_priv->uncore.fifo_count--; 173 174 return ret; 175 } 176 177 static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) 178 { 179 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 180 _MASKED_BIT_DISABLE(0xffff)); 181 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 182 _MASKED_BIT_DISABLE(0xffff)); 183 /* something from same cacheline, but !FORCEWAKE_VLV */ 184 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); 185 } 186 187 static void __vlv_force_wake_get(struct drm_i915_private *dev_priv, 188 int fw_engine) 189 { 190 /* Check for Render Engine */ 191 if (FORCEWAKE_RENDER & fw_engine) { 192 if (wait_for_atomic((__raw_i915_read32(dev_priv, 193 FORCEWAKE_ACK_VLV) & 194 FORCEWAKE_KERNEL) == 0, 195 FORCEWAKE_ACK_TIMEOUT_MS)) 196 DRM_ERROR("Timed out: Render forcewake old ack to clear.\n"); 197 198 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 199 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 200 201 if (wait_for_atomic((__raw_i915_read32(dev_priv, 202 FORCEWAKE_ACK_VLV) & 203 FORCEWAKE_KERNEL), 204 FORCEWAKE_ACK_TIMEOUT_MS)) 205 DRM_ERROR("Timed out: waiting for Render to ack.\n"); 206 } 207 208 /* Check for Media Engine */ 209 if (FORCEWAKE_MEDIA & fw_engine) { 210 if (wait_for_atomic((__raw_i915_read32(dev_priv, 211 FORCEWAKE_ACK_MEDIA_VLV) & 212 FORCEWAKE_KERNEL) == 0, 213 FORCEWAKE_ACK_TIMEOUT_MS)) 214 DRM_ERROR("Timed out: Media forcewake old ack to clear.\n"); 215 216 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 217 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 218 219 if (wait_for_atomic((__raw_i915_read32(dev_priv, 220 FORCEWAKE_ACK_MEDIA_VLV) & 221 FORCEWAKE_KERNEL), 222 FORCEWAKE_ACK_TIMEOUT_MS)) 223 DRM_ERROR("Timed out: waiting for media to ack.\n"); 224 } 225 } 226 227 static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, 228 int fw_engine) 229 { 230 231 /* Check for Render Engine */ 232 if (FORCEWAKE_RENDER & fw_engine) 233 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 234 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 235 236 237 /* Check for Media Engine */ 238 if (FORCEWAKE_MEDIA & fw_engine) 239 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 240 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 241 242 /* something from same cacheline, but !FORCEWAKE_VLV */ 243 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); 244 if (!IS_CHERRYVIEW(dev_priv->dev)) 245 gen6_gt_check_fifodbg(dev_priv); 246 } 247 248 static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) 249 { 250 unsigned long irqflags; 251 252 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 253 254 if (fw_engine & FORCEWAKE_RENDER && 255 dev_priv->uncore.fw_rendercount++ != 0) 256 fw_engine &= ~FORCEWAKE_RENDER; 257 if (fw_engine & FORCEWAKE_MEDIA && 258 dev_priv->uncore.fw_mediacount++ != 0) 259 fw_engine &= ~FORCEWAKE_MEDIA; 260 261 if (fw_engine) 262 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine); 263 264 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 265 } 266 267 static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) 268 { 269 unsigned long irqflags; 270 271 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 272 273 if (fw_engine & FORCEWAKE_RENDER) { 274 WARN_ON(!dev_priv->uncore.fw_rendercount); 275 if (--dev_priv->uncore.fw_rendercount != 0) 276 fw_engine &= ~FORCEWAKE_RENDER; 277 } 278 279 if (fw_engine & FORCEWAKE_MEDIA) { 280 WARN_ON(!dev_priv->uncore.fw_mediacount); 281 if (--dev_priv->uncore.fw_mediacount != 0) 282 fw_engine &= ~FORCEWAKE_MEDIA; 283 } 284 285 if (fw_engine) 286 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine); 287 288 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 289 } 290 291 static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) 292 { 293 __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9, 294 _MASKED_BIT_DISABLE(0xffff)); 295 296 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9, 297 _MASKED_BIT_DISABLE(0xffff)); 298 299 __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9, 300 _MASKED_BIT_DISABLE(0xffff)); 301 } 302 303 static void 304 __gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) 305 { 306 /* Check for Render Engine */ 307 if (FORCEWAKE_RENDER & fw_engine) { 308 if (wait_for_atomic((__raw_i915_read32(dev_priv, 309 FORCEWAKE_ACK_RENDER_GEN9) & 310 FORCEWAKE_KERNEL) == 0, 311 FORCEWAKE_ACK_TIMEOUT_MS)) 312 DRM_ERROR("Timed out: Render forcewake old ack to clear.\n"); 313 314 __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9, 315 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 316 317 if (wait_for_atomic((__raw_i915_read32(dev_priv, 318 FORCEWAKE_ACK_RENDER_GEN9) & 319 FORCEWAKE_KERNEL), 320 FORCEWAKE_ACK_TIMEOUT_MS)) 321 DRM_ERROR("Timed out: waiting for Render to ack.\n"); 322 } 323 324 /* Check for Media Engine */ 325 if (FORCEWAKE_MEDIA & fw_engine) { 326 if (wait_for_atomic((__raw_i915_read32(dev_priv, 327 FORCEWAKE_ACK_MEDIA_GEN9) & 328 FORCEWAKE_KERNEL) == 0, 329 FORCEWAKE_ACK_TIMEOUT_MS)) 330 DRM_ERROR("Timed out: Media forcewake old ack to clear.\n"); 331 332 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9, 333 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 334 335 if (wait_for_atomic((__raw_i915_read32(dev_priv, 336 FORCEWAKE_ACK_MEDIA_GEN9) & 337 FORCEWAKE_KERNEL), 338 FORCEWAKE_ACK_TIMEOUT_MS)) 339 DRM_ERROR("Timed out: waiting for Media to ack.\n"); 340 } 341 342 /* Check for Blitter Engine */ 343 if (FORCEWAKE_BLITTER & fw_engine) { 344 if (wait_for_atomic((__raw_i915_read32(dev_priv, 345 FORCEWAKE_ACK_BLITTER_GEN9) & 346 FORCEWAKE_KERNEL) == 0, 347 FORCEWAKE_ACK_TIMEOUT_MS)) 348 DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n"); 349 350 __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9, 351 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 352 353 if (wait_for_atomic((__raw_i915_read32(dev_priv, 354 FORCEWAKE_ACK_BLITTER_GEN9) & 355 FORCEWAKE_KERNEL), 356 FORCEWAKE_ACK_TIMEOUT_MS)) 357 DRM_ERROR("Timed out: waiting for Blitter to ack.\n"); 358 } 359 } 360 361 static void 362 __gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) 363 { 364 /* Check for Render Engine */ 365 if (FORCEWAKE_RENDER & fw_engine) 366 __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9, 367 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 368 369 /* Check for Media Engine */ 370 if (FORCEWAKE_MEDIA & fw_engine) 371 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9, 372 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 373 374 /* Check for Blitter Engine */ 375 if (FORCEWAKE_BLITTER & fw_engine) 376 __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9, 377 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 378 } 379 380 static void 381 gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) 382 { 383 unsigned long irqflags; 384 385 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 386 387 if (FORCEWAKE_RENDER & fw_engine) { 388 if (dev_priv->uncore.fw_rendercount++ == 0) 389 dev_priv->uncore.funcs.force_wake_get(dev_priv, 390 FORCEWAKE_RENDER); 391 } 392 393 if (FORCEWAKE_MEDIA & fw_engine) { 394 if (dev_priv->uncore.fw_mediacount++ == 0) 395 dev_priv->uncore.funcs.force_wake_get(dev_priv, 396 FORCEWAKE_MEDIA); 397 } 398 399 if (FORCEWAKE_BLITTER & fw_engine) { 400 if (dev_priv->uncore.fw_blittercount++ == 0) 401 dev_priv->uncore.funcs.force_wake_get(dev_priv, 402 FORCEWAKE_BLITTER); 403 } 404 405 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 406 } 407 408 static void 409 gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) 410 { 411 unsigned long irqflags; 412 413 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 414 415 if (FORCEWAKE_RENDER & fw_engine) { 416 WARN_ON(dev_priv->uncore.fw_rendercount == 0); 417 if (--dev_priv->uncore.fw_rendercount == 0) 418 dev_priv->uncore.funcs.force_wake_put(dev_priv, 419 FORCEWAKE_RENDER); 420 } 421 422 if (FORCEWAKE_MEDIA & fw_engine) { 423 WARN_ON(dev_priv->uncore.fw_mediacount == 0); 424 if (--dev_priv->uncore.fw_mediacount == 0) 425 dev_priv->uncore.funcs.force_wake_put(dev_priv, 426 FORCEWAKE_MEDIA); 427 } 428 429 if (FORCEWAKE_BLITTER & fw_engine) { 430 WARN_ON(dev_priv->uncore.fw_blittercount == 0); 431 if (--dev_priv->uncore.fw_blittercount == 0) 432 dev_priv->uncore.funcs.force_wake_put(dev_priv, 433 FORCEWAKE_BLITTER); 434 } 435 436 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 437 } 438 439 static void gen6_force_wake_timer(unsigned long arg) 440 { 441 struct drm_i915_private *dev_priv = (void *)arg; 442 unsigned long irqflags; 443 444 assert_device_not_suspended(dev_priv); 445 446 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 447 WARN_ON(!dev_priv->uncore.forcewake_count); 448 449 if (--dev_priv->uncore.forcewake_count == 0) 450 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); 451 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 452 453 intel_runtime_pm_put(dev_priv); 454 } 455 456 void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) 457 { 458 struct drm_i915_private *dev_priv = dev->dev_private; 459 unsigned long irqflags; 460 461 if (del_timer_sync(&dev_priv->uncore.force_wake_timer)) 462 gen6_force_wake_timer((unsigned long)dev_priv); 463 464 /* Hold uncore.lock across reset to prevent any register access 465 * with forcewake not set correctly 466 */ 467 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 468 469 if (IS_VALLEYVIEW(dev)) 470 vlv_force_wake_reset(dev_priv); 471 else if (IS_GEN6(dev) || IS_GEN7(dev)) 472 __gen6_gt_force_wake_reset(dev_priv); 473 474 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) 475 __gen7_gt_force_wake_mt_reset(dev_priv); 476 477 if (IS_GEN9(dev)) 478 __gen9_gt_force_wake_mt_reset(dev_priv); 479 480 if (restore) { /* If reset with a user forcewake, try to restore */ 481 unsigned fw = 0; 482 483 if (IS_VALLEYVIEW(dev)) { 484 if (dev_priv->uncore.fw_rendercount) 485 fw |= FORCEWAKE_RENDER; 486 487 if (dev_priv->uncore.fw_mediacount) 488 fw |= FORCEWAKE_MEDIA; 489 } else if (IS_GEN9(dev)) { 490 if (dev_priv->uncore.fw_rendercount) 491 fw |= FORCEWAKE_RENDER; 492 493 if (dev_priv->uncore.fw_mediacount) 494 fw |= FORCEWAKE_MEDIA; 495 496 if (dev_priv->uncore.fw_blittercount) 497 fw |= FORCEWAKE_BLITTER; 498 } else { 499 if (dev_priv->uncore.forcewake_count) 500 fw = FORCEWAKE_ALL; 501 } 502 503 if (fw) 504 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 505 506 if (IS_GEN6(dev) || IS_GEN7(dev)) 507 dev_priv->uncore.fifo_count = 508 __raw_i915_read32(dev_priv, GTFIFOCTL) & 509 GT_FIFO_FREE_ENTRIES_MASK; 510 } 511 512 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 513 } 514 515 static void __intel_uncore_early_sanitize(struct drm_device *dev, 516 bool restore_forcewake) 517 { 518 struct drm_i915_private *dev_priv = dev->dev_private; 519 520 if (HAS_FPGA_DBG_UNCLAIMED(dev)) 521 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 522 523 if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && 524 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) { 525 /* The docs do not explain exactly how the calculation can be 526 * made. It is somewhat guessable, but for now, it's always 527 * 128MB. 528 * NB: We can't write IDICR yet because we do not have gt funcs 529 * set up */ 530 dev_priv->ellc_size = 128; 531 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); 532 } 533 534 /* clear out old GT FIFO errors */ 535 if (IS_GEN6(dev) || IS_GEN7(dev)) 536 __raw_i915_write32(dev_priv, GTFIFODBG, 537 __raw_i915_read32(dev_priv, GTFIFODBG)); 538 539 intel_uncore_forcewake_reset(dev, restore_forcewake); 540 } 541 542 void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) 543 { 544 __intel_uncore_early_sanitize(dev, restore_forcewake); 545 i915_check_and_clear_faults(dev); 546 } 547 548 void intel_uncore_sanitize(struct drm_device *dev) 549 { 550 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 551 intel_disable_gt_powersave(dev); 552 } 553 554 /* 555 * Generally this is called implicitly by the register read function. However, 556 * if some sequence requires the GT to not power down then this function should 557 * be called at the beginning of the sequence followed by a call to 558 * gen6_gt_force_wake_put() at the end of the sequence. 559 */ 560 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) 561 { 562 unsigned long irqflags; 563 564 if (!dev_priv->uncore.funcs.force_wake_get) 565 return; 566 567 intel_runtime_pm_get(dev_priv); 568 569 /* Redirect to Gen9 specific routine */ 570 if (IS_GEN9(dev_priv->dev)) 571 return gen9_force_wake_get(dev_priv, fw_engine); 572 573 /* Redirect to VLV specific routine */ 574 if (IS_VALLEYVIEW(dev_priv->dev)) 575 return vlv_force_wake_get(dev_priv, fw_engine); 576 577 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 578 if (dev_priv->uncore.forcewake_count++ == 0) 579 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); 580 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 581 } 582 583 /* 584 * see gen6_gt_force_wake_get() 585 */ 586 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) 587 { 588 unsigned long irqflags; 589 bool delayed = false; 590 591 if (!dev_priv->uncore.funcs.force_wake_put) 592 return; 593 594 /* Redirect to Gen9 specific routine */ 595 if (IS_GEN9(dev_priv->dev)) { 596 gen9_force_wake_put(dev_priv, fw_engine); 597 goto out; 598 } 599 600 /* Redirect to VLV specific routine */ 601 if (IS_VALLEYVIEW(dev_priv->dev)) { 602 vlv_force_wake_put(dev_priv, fw_engine); 603 goto out; 604 } 605 606 607 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 608 WARN_ON(!dev_priv->uncore.forcewake_count); 609 610 if (--dev_priv->uncore.forcewake_count == 0) { 611 dev_priv->uncore.forcewake_count++; 612 delayed = true; 613 mod_timer_pinned(&dev_priv->uncore.force_wake_timer, 614 jiffies + 1); 615 } 616 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 617 618 out: 619 if (!delayed) 620 intel_runtime_pm_put(dev_priv); 621 } 622 623 void assert_force_wake_inactive(struct drm_i915_private *dev_priv) 624 { 625 if (!dev_priv->uncore.funcs.force_wake_get) 626 return; 627 628 WARN_ON(dev_priv->uncore.forcewake_count > 0); 629 } 630 631 /* We give fast paths for the really cool registers */ 632 #define NEEDS_FORCE_WAKE(dev_priv, reg) \ 633 ((reg) < 0x40000 && (reg) != FORCEWAKE) 634 635 #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) 636 637 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ 638 (REG_RANGE((reg), 0x2000, 0x4000) || \ 639 REG_RANGE((reg), 0x5000, 0x8000) || \ 640 REG_RANGE((reg), 0xB000, 0x12000) || \ 641 REG_RANGE((reg), 0x2E000, 0x30000)) 642 643 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \ 644 (REG_RANGE((reg), 0x12000, 0x14000) || \ 645 REG_RANGE((reg), 0x22000, 0x24000) || \ 646 REG_RANGE((reg), 0x30000, 0x40000)) 647 648 #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \ 649 (REG_RANGE((reg), 0x2000, 0x4000) || \ 650 REG_RANGE((reg), 0x5000, 0x8000) || \ 651 REG_RANGE((reg), 0x8300, 0x8500) || \ 652 REG_RANGE((reg), 0xB000, 0xC000) || \ 653 REG_RANGE((reg), 0xE000, 0xE800)) 654 655 #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \ 656 (REG_RANGE((reg), 0x8800, 0x8900) || \ 657 REG_RANGE((reg), 0xD000, 0xD800) || \ 658 REG_RANGE((reg), 0x12000, 0x14000) || \ 659 REG_RANGE((reg), 0x1A000, 0x1C000) || \ 660 REG_RANGE((reg), 0x1E800, 0x1EA00) || \ 661 REG_RANGE((reg), 0x30000, 0x40000)) 662 663 #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \ 664 (REG_RANGE((reg), 0x4000, 0x5000) || \ 665 REG_RANGE((reg), 0x8000, 0x8300) || \ 666 REG_RANGE((reg), 0x8500, 0x8600) || \ 667 REG_RANGE((reg), 0x9000, 0xB000) || \ 668 REG_RANGE((reg), 0xC000, 0xC800) || \ 669 REG_RANGE((reg), 0xF000, 0x10000) || \ 670 REG_RANGE((reg), 0x14000, 0x14400) || \ 671 REG_RANGE((reg), 0x22000, 0x24000)) 672 673 #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \ 674 REG_RANGE((reg), 0xB00, 0x2000) 675 676 #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \ 677 (REG_RANGE((reg), 0x2000, 0x2700) || \ 678 REG_RANGE((reg), 0x3000, 0x4000) || \ 679 REG_RANGE((reg), 0x5200, 0x8000) || \ 680 REG_RANGE((reg), 0x8140, 0x8160) || \ 681 REG_RANGE((reg), 0x8300, 0x8500) || \ 682 REG_RANGE((reg), 0x8C00, 0x8D00) || \ 683 REG_RANGE((reg), 0xB000, 0xB480) || \ 684 REG_RANGE((reg), 0xE000, 0xE900) || \ 685 REG_RANGE((reg), 0x24400, 0x24800)) 686 687 #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \ 688 (REG_RANGE((reg), 0x8130, 0x8140) || \ 689 REG_RANGE((reg), 0x8800, 0x8A00) || \ 690 REG_RANGE((reg), 0xD000, 0xD800) || \ 691 REG_RANGE((reg), 0x12000, 0x14000) || \ 692 REG_RANGE((reg), 0x1A000, 0x1EA00) || \ 693 REG_RANGE((reg), 0x30000, 0x40000)) 694 695 #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \ 696 REG_RANGE((reg), 0x9400, 0x9800) 697 698 #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \ 699 ((reg) < 0x40000 &&\ 700 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \ 701 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \ 702 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \ 703 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) 704 705 static void 706 ilk_dummy_write(struct drm_i915_private *dev_priv) 707 { 708 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 709 * the chip from rc6 before touching it for real. MI_MODE is masked, 710 * hence harmless to write 0 into. */ 711 __raw_i915_write32(dev_priv, MI_MODE, 0); 712 } 713 714 static void 715 hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read, 716 bool before) 717 { 718 const char *op = read ? "reading" : "writing to"; 719 const char *when = before ? "before" : "after"; 720 721 if (!i915.mmio_debug) 722 return; 723 724 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 725 WARN(1, "Unclaimed register detected %s %s register 0x%x\n", 726 when, op, reg); 727 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 728 } 729 } 730 731 static void 732 hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) 733 { 734 if (i915.mmio_debug) 735 return; 736 737 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 738 DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem."); 739 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 740 } 741 } 742 743 #define REG_READ_HEADER(x) \ 744 unsigned long irqflags; \ 745 u##x val = 0; \ 746 assert_device_not_suspended(dev_priv); \ 747 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 748 749 #define REG_READ_FOOTER \ 750 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 751 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 752 return val 753 754 #define __gen4_read(x) \ 755 static u##x \ 756 gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 757 REG_READ_HEADER(x); \ 758 val = __raw_i915_read##x(dev_priv, reg); \ 759 REG_READ_FOOTER; \ 760 } 761 762 #define __gen5_read(x) \ 763 static u##x \ 764 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 765 REG_READ_HEADER(x); \ 766 ilk_dummy_write(dev_priv); \ 767 val = __raw_i915_read##x(dev_priv, reg); \ 768 REG_READ_FOOTER; \ 769 } 770 771 #define __gen6_read(x) \ 772 static u##x \ 773 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 774 REG_READ_HEADER(x); \ 775 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ 776 if (dev_priv->uncore.forcewake_count == 0 && \ 777 NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 778 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 779 FORCEWAKE_ALL); \ 780 val = __raw_i915_read##x(dev_priv, reg); \ 781 dev_priv->uncore.funcs.force_wake_put(dev_priv, \ 782 FORCEWAKE_ALL); \ 783 } else { \ 784 val = __raw_i915_read##x(dev_priv, reg); \ 785 } \ 786 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ 787 REG_READ_FOOTER; \ 788 } 789 790 #define __vlv_read(x) \ 791 static u##x \ 792 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 793 unsigned fwengine = 0; \ 794 REG_READ_HEADER(x); \ 795 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \ 796 if (dev_priv->uncore.fw_rendercount == 0) \ 797 fwengine = FORCEWAKE_RENDER; \ 798 } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \ 799 if (dev_priv->uncore.fw_mediacount == 0) \ 800 fwengine = FORCEWAKE_MEDIA; \ 801 } \ 802 if (fwengine) \ 803 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ 804 val = __raw_i915_read##x(dev_priv, reg); \ 805 if (fwengine) \ 806 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ 807 REG_READ_FOOTER; \ 808 } 809 810 #define __chv_read(x) \ 811 static u##x \ 812 chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 813 unsigned fwengine = 0; \ 814 REG_READ_HEADER(x); \ 815 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \ 816 if (dev_priv->uncore.fw_rendercount == 0) \ 817 fwengine = FORCEWAKE_RENDER; \ 818 } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \ 819 if (dev_priv->uncore.fw_mediacount == 0) \ 820 fwengine = FORCEWAKE_MEDIA; \ 821 } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \ 822 if (dev_priv->uncore.fw_rendercount == 0) \ 823 fwengine |= FORCEWAKE_RENDER; \ 824 if (dev_priv->uncore.fw_mediacount == 0) \ 825 fwengine |= FORCEWAKE_MEDIA; \ 826 } \ 827 if (fwengine) \ 828 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ 829 val = __raw_i915_read##x(dev_priv, reg); \ 830 if (fwengine) \ 831 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ 832 REG_READ_FOOTER; \ 833 } 834 835 #define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \ 836 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) 837 838 #define __gen9_read(x) \ 839 static u##x \ 840 gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 841 REG_READ_HEADER(x); \ 842 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 843 val = __raw_i915_read##x(dev_priv, reg); \ 844 } else { \ 845 unsigned fwengine = 0; \ 846 if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \ 847 if (dev_priv->uncore.fw_rendercount == 0) \ 848 fwengine = FORCEWAKE_RENDER; \ 849 } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \ 850 if (dev_priv->uncore.fw_mediacount == 0) \ 851 fwengine = FORCEWAKE_MEDIA; \ 852 } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \ 853 if (dev_priv->uncore.fw_rendercount == 0) \ 854 fwengine |= FORCEWAKE_RENDER; \ 855 if (dev_priv->uncore.fw_mediacount == 0) \ 856 fwengine |= FORCEWAKE_MEDIA; \ 857 } else { \ 858 if (dev_priv->uncore.fw_blittercount == 0) \ 859 fwengine = FORCEWAKE_BLITTER; \ 860 } \ 861 if (fwengine) \ 862 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ 863 val = __raw_i915_read##x(dev_priv, reg); \ 864 if (fwengine) \ 865 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ 866 } \ 867 REG_READ_FOOTER; \ 868 } 869 870 __gen9_read(8) 871 __gen9_read(16) 872 __gen9_read(32) 873 __gen9_read(64) 874 __chv_read(8) 875 __chv_read(16) 876 __chv_read(32) 877 __chv_read(64) 878 __vlv_read(8) 879 __vlv_read(16) 880 __vlv_read(32) 881 __vlv_read(64) 882 __gen6_read(8) 883 __gen6_read(16) 884 __gen6_read(32) 885 __gen6_read(64) 886 __gen5_read(8) 887 __gen5_read(16) 888 __gen5_read(32) 889 __gen5_read(64) 890 __gen4_read(8) 891 __gen4_read(16) 892 __gen4_read(32) 893 __gen4_read(64) 894 895 #undef __gen9_read 896 #undef __chv_read 897 #undef __vlv_read 898 #undef __gen6_read 899 #undef __gen5_read 900 #undef __gen4_read 901 #undef REG_READ_FOOTER 902 #undef REG_READ_HEADER 903 904 #define REG_WRITE_HEADER \ 905 unsigned long irqflags; \ 906 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 907 assert_device_not_suspended(dev_priv); \ 908 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 909 910 #define REG_WRITE_FOOTER \ 911 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) 912 913 #define __gen4_write(x) \ 914 static void \ 915 gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 916 REG_WRITE_HEADER; \ 917 __raw_i915_write##x(dev_priv, reg, val); \ 918 REG_WRITE_FOOTER; \ 919 } 920 921 #define __gen5_write(x) \ 922 static void \ 923 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 924 REG_WRITE_HEADER; \ 925 ilk_dummy_write(dev_priv); \ 926 __raw_i915_write##x(dev_priv, reg, val); \ 927 REG_WRITE_FOOTER; \ 928 } 929 930 #define __gen6_write(x) \ 931 static void \ 932 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 933 u32 __fifo_ret = 0; \ 934 REG_WRITE_HEADER; \ 935 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 936 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 937 } \ 938 __raw_i915_write##x(dev_priv, reg, val); \ 939 if (unlikely(__fifo_ret)) { \ 940 gen6_gt_check_fifodbg(dev_priv); \ 941 } \ 942 REG_WRITE_FOOTER; \ 943 } 944 945 #define __hsw_write(x) \ 946 static void \ 947 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 948 u32 __fifo_ret = 0; \ 949 REG_WRITE_HEADER; \ 950 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 951 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 952 } \ 953 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 954 __raw_i915_write##x(dev_priv, reg, val); \ 955 if (unlikely(__fifo_ret)) { \ 956 gen6_gt_check_fifodbg(dev_priv); \ 957 } \ 958 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 959 hsw_unclaimed_reg_detect(dev_priv); \ 960 REG_WRITE_FOOTER; \ 961 } 962 963 static const u32 gen8_shadowed_regs[] = { 964 FORCEWAKE_MT, 965 GEN6_RPNSWREQ, 966 GEN6_RC_VIDEO_FREQ, 967 RING_TAIL(RENDER_RING_BASE), 968 RING_TAIL(GEN6_BSD_RING_BASE), 969 RING_TAIL(VEBOX_RING_BASE), 970 RING_TAIL(BLT_RING_BASE), 971 /* TODO: Other registers are not yet used */ 972 }; 973 974 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) 975 { 976 int i; 977 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) 978 if (reg == gen8_shadowed_regs[i]) 979 return true; 980 981 return false; 982 } 983 984 #define __gen8_write(x) \ 985 static void \ 986 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 987 REG_WRITE_HEADER; \ 988 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 989 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \ 990 if (dev_priv->uncore.forcewake_count == 0) \ 991 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 992 FORCEWAKE_ALL); \ 993 __raw_i915_write##x(dev_priv, reg, val); \ 994 if (dev_priv->uncore.forcewake_count == 0) \ 995 dev_priv->uncore.funcs.force_wake_put(dev_priv, \ 996 FORCEWAKE_ALL); \ 997 } else { \ 998 __raw_i915_write##x(dev_priv, reg, val); \ 999 } \ 1000 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 1001 hsw_unclaimed_reg_detect(dev_priv); \ 1002 REG_WRITE_FOOTER; \ 1003 } 1004 1005 #define __chv_write(x) \ 1006 static void \ 1007 chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 1008 unsigned fwengine = 0; \ 1009 bool shadowed = is_gen8_shadowed(dev_priv, reg); \ 1010 REG_WRITE_HEADER; \ 1011 if (!shadowed) { \ 1012 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \ 1013 if (dev_priv->uncore.fw_rendercount == 0) \ 1014 fwengine = FORCEWAKE_RENDER; \ 1015 } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \ 1016 if (dev_priv->uncore.fw_mediacount == 0) \ 1017 fwengine = FORCEWAKE_MEDIA; \ 1018 } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \ 1019 if (dev_priv->uncore.fw_rendercount == 0) \ 1020 fwengine |= FORCEWAKE_RENDER; \ 1021 if (dev_priv->uncore.fw_mediacount == 0) \ 1022 fwengine |= FORCEWAKE_MEDIA; \ 1023 } \ 1024 } \ 1025 if (fwengine) \ 1026 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ 1027 __raw_i915_write##x(dev_priv, reg, val); \ 1028 if (fwengine) \ 1029 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ 1030 REG_WRITE_FOOTER; \ 1031 } 1032 1033 static const u32 gen9_shadowed_regs[] = { 1034 RING_TAIL(RENDER_RING_BASE), 1035 RING_TAIL(GEN6_BSD_RING_BASE), 1036 RING_TAIL(VEBOX_RING_BASE), 1037 RING_TAIL(BLT_RING_BASE), 1038 FORCEWAKE_BLITTER_GEN9, 1039 FORCEWAKE_RENDER_GEN9, 1040 FORCEWAKE_MEDIA_GEN9, 1041 GEN6_RPNSWREQ, 1042 GEN6_RC_VIDEO_FREQ, 1043 /* TODO: Other registers are not yet used */ 1044 }; 1045 1046 static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg) 1047 { 1048 int i; 1049 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++) 1050 if (reg == gen9_shadowed_regs[i]) 1051 return true; 1052 1053 return false; 1054 } 1055 1056 #define __gen9_write(x) \ 1057 static void \ 1058 gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \ 1059 bool trace) { \ 1060 REG_WRITE_HEADER; \ 1061 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \ 1062 is_gen9_shadowed(dev_priv, reg)) { \ 1063 __raw_i915_write##x(dev_priv, reg, val); \ 1064 } else { \ 1065 unsigned fwengine = 0; \ 1066 if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \ 1067 if (dev_priv->uncore.fw_rendercount == 0) \ 1068 fwengine = FORCEWAKE_RENDER; \ 1069 } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \ 1070 if (dev_priv->uncore.fw_mediacount == 0) \ 1071 fwengine = FORCEWAKE_MEDIA; \ 1072 } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \ 1073 if (dev_priv->uncore.fw_rendercount == 0) \ 1074 fwengine |= FORCEWAKE_RENDER; \ 1075 if (dev_priv->uncore.fw_mediacount == 0) \ 1076 fwengine |= FORCEWAKE_MEDIA; \ 1077 } else { \ 1078 if (dev_priv->uncore.fw_blittercount == 0) \ 1079 fwengine = FORCEWAKE_BLITTER; \ 1080 } \ 1081 if (fwengine) \ 1082 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 1083 fwengine); \ 1084 __raw_i915_write##x(dev_priv, reg, val); \ 1085 if (fwengine) \ 1086 dev_priv->uncore.funcs.force_wake_put(dev_priv, \ 1087 fwengine); \ 1088 } \ 1089 REG_WRITE_FOOTER; \ 1090 } 1091 1092 __gen9_write(8) 1093 __gen9_write(16) 1094 __gen9_write(32) 1095 __gen9_write(64) 1096 __chv_write(8) 1097 __chv_write(16) 1098 __chv_write(32) 1099 __chv_write(64) 1100 __gen8_write(8) 1101 __gen8_write(16) 1102 __gen8_write(32) 1103 __gen8_write(64) 1104 __hsw_write(8) 1105 __hsw_write(16) 1106 __hsw_write(32) 1107 __hsw_write(64) 1108 __gen6_write(8) 1109 __gen6_write(16) 1110 __gen6_write(32) 1111 __gen6_write(64) 1112 __gen5_write(8) 1113 __gen5_write(16) 1114 __gen5_write(32) 1115 __gen5_write(64) 1116 __gen4_write(8) 1117 __gen4_write(16) 1118 __gen4_write(32) 1119 __gen4_write(64) 1120 1121 #undef __gen9_write 1122 #undef __chv_write 1123 #undef __gen8_write 1124 #undef __hsw_write 1125 #undef __gen6_write 1126 #undef __gen5_write 1127 #undef __gen4_write 1128 #undef REG_WRITE_FOOTER 1129 #undef REG_WRITE_HEADER 1130 1131 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \ 1132 do { \ 1133 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \ 1134 dev_priv->uncore.funcs.mmio_writew = x##_write16; \ 1135 dev_priv->uncore.funcs.mmio_writel = x##_write32; \ 1136 dev_priv->uncore.funcs.mmio_writeq = x##_write64; \ 1137 } while (0) 1138 1139 #define ASSIGN_READ_MMIO_VFUNCS(x) \ 1140 do { \ 1141 dev_priv->uncore.funcs.mmio_readb = x##_read8; \ 1142 dev_priv->uncore.funcs.mmio_readw = x##_read16; \ 1143 dev_priv->uncore.funcs.mmio_readl = x##_read32; \ 1144 dev_priv->uncore.funcs.mmio_readq = x##_read64; \ 1145 } while (0) 1146 1147 void intel_uncore_init(struct drm_device *dev) 1148 { 1149 struct drm_i915_private *dev_priv = dev->dev_private; 1150 1151 setup_timer(&dev_priv->uncore.force_wake_timer, 1152 gen6_force_wake_timer, (unsigned long)dev_priv); 1153 1154 __intel_uncore_early_sanitize(dev, false); 1155 1156 if (IS_GEN9(dev)) { 1157 dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get; 1158 dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put; 1159 } else if (IS_VALLEYVIEW(dev)) { 1160 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; 1161 dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put; 1162 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1163 dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get; 1164 dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put; 1165 } else if (IS_IVYBRIDGE(dev)) { 1166 u32 ecobus; 1167 1168 /* IVB configs may use multi-threaded forcewake */ 1169 1170 /* A small trick here - if the bios hasn't configured 1171 * MT forcewake, and if the device is in RC6, then 1172 * force_wake_mt_get will not wake the device and the 1173 * ECOBUS read will return zero. Which will be 1174 * (correctly) interpreted by the test below as MT 1175 * forcewake being disabled. 1176 */ 1177 mutex_lock(&dev->struct_mutex); 1178 __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL); 1179 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1180 __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL); 1181 mutex_unlock(&dev->struct_mutex); 1182 1183 if (ecobus & FORCEWAKE_MT_ENABLE) { 1184 dev_priv->uncore.funcs.force_wake_get = 1185 __gen7_gt_force_wake_mt_get; 1186 dev_priv->uncore.funcs.force_wake_put = 1187 __gen7_gt_force_wake_mt_put; 1188 } else { 1189 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1190 DRM_INFO("when using vblank-synced partial screen updates.\n"); 1191 dev_priv->uncore.funcs.force_wake_get = 1192 __gen6_gt_force_wake_get; 1193 dev_priv->uncore.funcs.force_wake_put = 1194 __gen6_gt_force_wake_put; 1195 } 1196 } else if (IS_GEN6(dev)) { 1197 dev_priv->uncore.funcs.force_wake_get = 1198 __gen6_gt_force_wake_get; 1199 dev_priv->uncore.funcs.force_wake_put = 1200 __gen6_gt_force_wake_put; 1201 } 1202 1203 switch (INTEL_INFO(dev)->gen) { 1204 default: 1205 WARN_ON(1); 1206 return; 1207 case 9: 1208 ASSIGN_WRITE_MMIO_VFUNCS(gen9); 1209 ASSIGN_READ_MMIO_VFUNCS(gen9); 1210 break; 1211 case 8: 1212 if (IS_CHERRYVIEW(dev)) { 1213 ASSIGN_WRITE_MMIO_VFUNCS(chv); 1214 ASSIGN_READ_MMIO_VFUNCS(chv); 1215 1216 } else { 1217 ASSIGN_WRITE_MMIO_VFUNCS(gen8); 1218 ASSIGN_READ_MMIO_VFUNCS(gen6); 1219 } 1220 break; 1221 case 7: 1222 case 6: 1223 if (IS_HASWELL(dev)) { 1224 ASSIGN_WRITE_MMIO_VFUNCS(hsw); 1225 } else { 1226 ASSIGN_WRITE_MMIO_VFUNCS(gen6); 1227 } 1228 1229 if (IS_VALLEYVIEW(dev)) { 1230 ASSIGN_READ_MMIO_VFUNCS(vlv); 1231 } else { 1232 ASSIGN_READ_MMIO_VFUNCS(gen6); 1233 } 1234 break; 1235 case 5: 1236 ASSIGN_WRITE_MMIO_VFUNCS(gen5); 1237 ASSIGN_READ_MMIO_VFUNCS(gen5); 1238 break; 1239 case 4: 1240 case 3: 1241 case 2: 1242 ASSIGN_WRITE_MMIO_VFUNCS(gen4); 1243 ASSIGN_READ_MMIO_VFUNCS(gen4); 1244 break; 1245 } 1246 1247 i915_check_and_clear_faults(dev); 1248 } 1249 #undef ASSIGN_WRITE_MMIO_VFUNCS 1250 #undef ASSIGN_READ_MMIO_VFUNCS 1251 1252 void intel_uncore_fini(struct drm_device *dev) 1253 { 1254 /* Paranoia: make sure we have disabled everything before we exit. */ 1255 intel_uncore_sanitize(dev); 1256 intel_uncore_forcewake_reset(dev, false); 1257 } 1258 1259 #define GEN_RANGE(l, h) GENMASK(h, l) 1260 1261 static const struct register_whitelist { 1262 uint64_t offset; 1263 uint32_t size; 1264 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 1265 uint32_t gen_bitmask; 1266 } whitelist[] = { 1267 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) }, 1268 }; 1269 1270 int i915_reg_read_ioctl(struct drm_device *dev, 1271 void *data, struct drm_file *file) 1272 { 1273 struct drm_i915_private *dev_priv = dev->dev_private; 1274 struct drm_i915_reg_read *reg = data; 1275 struct register_whitelist const *entry = whitelist; 1276 int i, ret = 0; 1277 1278 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1279 if (entry->offset == reg->offset && 1280 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1281 break; 1282 } 1283 1284 if (i == ARRAY_SIZE(whitelist)) 1285 return -EINVAL; 1286 1287 intel_runtime_pm_get(dev_priv); 1288 1289 switch (entry->size) { 1290 case 8: 1291 reg->val = I915_READ64(reg->offset); 1292 break; 1293 case 4: 1294 reg->val = I915_READ(reg->offset); 1295 break; 1296 case 2: 1297 reg->val = I915_READ16(reg->offset); 1298 break; 1299 case 1: 1300 reg->val = I915_READ8(reg->offset); 1301 break; 1302 default: 1303 WARN_ON(1); 1304 ret = -EINVAL; 1305 goto out; 1306 } 1307 1308 out: 1309 intel_runtime_pm_put(dev_priv); 1310 return ret; 1311 } 1312 1313 int i915_get_reset_stats_ioctl(struct drm_device *dev, 1314 void *data, struct drm_file *file) 1315 { 1316 struct drm_i915_private *dev_priv = dev->dev_private; 1317 struct drm_i915_reset_stats *args = data; 1318 struct i915_ctx_hang_stats *hs; 1319 struct intel_context *ctx; 1320 int ret; 1321 1322 if (args->flags || args->pad) 1323 return -EINVAL; 1324 1325 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN)) 1326 return -EPERM; 1327 1328 ret = mutex_lock_interruptible(&dev->struct_mutex); 1329 if (ret) 1330 return ret; 1331 1332 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); 1333 if (IS_ERR(ctx)) { 1334 mutex_unlock(&dev->struct_mutex); 1335 return PTR_ERR(ctx); 1336 } 1337 hs = &ctx->hang_stats; 1338 1339 if (capable(CAP_SYS_ADMIN)) 1340 args->reset_count = i915_reset_count(&dev_priv->gpu_error); 1341 else 1342 args->reset_count = 0; 1343 1344 args->batch_active = hs->batch_active; 1345 args->batch_pending = hs->batch_pending; 1346 1347 mutex_unlock(&dev->struct_mutex); 1348 1349 return 0; 1350 } 1351 1352 static int i915_reset_complete(struct drm_device *dev) 1353 { 1354 u8 gdrst; 1355 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1356 return (gdrst & GRDOM_RESET_STATUS) == 0; 1357 } 1358 1359 static int i915_do_reset(struct drm_device *dev) 1360 { 1361 /* assert reset for at least 20 usec */ 1362 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1363 udelay(20); 1364 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1365 1366 return wait_for(i915_reset_complete(dev), 500); 1367 } 1368 1369 static int g4x_reset_complete(struct drm_device *dev) 1370 { 1371 u8 gdrst; 1372 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1373 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1374 } 1375 1376 static int g33_do_reset(struct drm_device *dev) 1377 { 1378 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1379 return wait_for(g4x_reset_complete(dev), 500); 1380 } 1381 1382 static int g4x_do_reset(struct drm_device *dev) 1383 { 1384 struct drm_i915_private *dev_priv = dev->dev_private; 1385 int ret; 1386 1387 pci_write_config_byte(dev->pdev, I915_GDRST, 1388 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1389 ret = wait_for(g4x_reset_complete(dev), 500); 1390 if (ret) 1391 return ret; 1392 1393 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1394 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1395 POSTING_READ(VDECCLK_GATE_D); 1396 1397 pci_write_config_byte(dev->pdev, I915_GDRST, 1398 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1399 ret = wait_for(g4x_reset_complete(dev), 500); 1400 if (ret) 1401 return ret; 1402 1403 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1404 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1405 POSTING_READ(VDECCLK_GATE_D); 1406 1407 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1408 1409 return 0; 1410 } 1411 1412 static int ironlake_do_reset(struct drm_device *dev) 1413 { 1414 struct drm_i915_private *dev_priv = dev->dev_private; 1415 int ret; 1416 1417 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1418 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 1419 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1420 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1421 if (ret) 1422 return ret; 1423 1424 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1425 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1426 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1427 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1428 if (ret) 1429 return ret; 1430 1431 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0); 1432 1433 return 0; 1434 } 1435 1436 static int gen6_do_reset(struct drm_device *dev) 1437 { 1438 struct drm_i915_private *dev_priv = dev->dev_private; 1439 int ret; 1440 1441 /* Reset the chip */ 1442 1443 /* GEN6_GDRST is not in the gt power well, no need to check 1444 * for fifo space for the write or forcewake the chip for 1445 * the read 1446 */ 1447 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL); 1448 1449 /* Spin waiting for the device to ack the reset request */ 1450 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 1451 1452 intel_uncore_forcewake_reset(dev, true); 1453 1454 return ret; 1455 } 1456 1457 int intel_gpu_reset(struct drm_device *dev) 1458 { 1459 if (INTEL_INFO(dev)->gen >= 6) 1460 return gen6_do_reset(dev); 1461 else if (IS_GEN5(dev)) 1462 return ironlake_do_reset(dev); 1463 else if (IS_G4X(dev)) 1464 return g4x_do_reset(dev); 1465 else if (IS_G33(dev)) 1466 return g33_do_reset(dev); 1467 else if (INTEL_INFO(dev)->gen >= 3) 1468 return i915_do_reset(dev); 1469 else 1470 return -ENODEV; 1471 } 1472 1473 void intel_uncore_check_errors(struct drm_device *dev) 1474 { 1475 struct drm_i915_private *dev_priv = dev->dev_private; 1476 1477 if (HAS_FPGA_DBG_UNCLAIMED(dev) && 1478 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 1479 DRM_ERROR("Unclaimed register before interrupt\n"); 1480 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 1481 } 1482 } 1483