1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "intel_drv.h" 26 27 #define FORCEWAKE_ACK_TIMEOUT_MS 2 28 29 #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) 30 #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__)) 31 32 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) 33 #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__)) 34 35 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 36 #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__)) 37 38 #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__)) 39 #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__)) 40 41 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) 42 43 44 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 45 { 46 u32 gt_thread_status_mask; 47 48 if (IS_HASWELL(dev_priv->dev)) 49 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW; 50 else 51 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK; 52 53 /* w/a for a sporadic read returning 0 by waiting for the GT 54 * thread to wake up. 55 */ 56 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) 57 DRM_ERROR("GT thread status wait timed out\n"); 58 } 59 60 static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) 61 { 62 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 63 /* something from same cacheline, but !FORCEWAKE */ 64 __raw_posting_read(dev_priv, ECOBUS); 65 } 66 67 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 68 { 69 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0, 70 FORCEWAKE_ACK_TIMEOUT_MS)) 71 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 72 73 __raw_i915_write32(dev_priv, FORCEWAKE, 1); 74 /* something from same cacheline, but !FORCEWAKE */ 75 __raw_posting_read(dev_priv, ECOBUS); 76 77 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1), 78 FORCEWAKE_ACK_TIMEOUT_MS)) 79 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 80 81 /* WaRsForcewakeWaitTC0:snb */ 82 __gen6_gt_wait_for_thread_c0(dev_priv); 83 } 84 85 static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) 86 { 87 __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); 88 /* something from same cacheline, but !FORCEWAKE_MT */ 89 __raw_posting_read(dev_priv, ECOBUS); 90 } 91 92 static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) 93 { 94 u32 forcewake_ack; 95 96 if (IS_HASWELL(dev_priv->dev)) 97 forcewake_ack = FORCEWAKE_ACK_HSW; 98 else 99 forcewake_ack = FORCEWAKE_MT_ACK; 100 101 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0, 102 FORCEWAKE_ACK_TIMEOUT_MS)) 103 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 104 105 __raw_i915_write32(dev_priv, FORCEWAKE_MT, 106 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 107 /* something from same cacheline, but !FORCEWAKE_MT */ 108 __raw_posting_read(dev_priv, ECOBUS); 109 110 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL), 111 FORCEWAKE_ACK_TIMEOUT_MS)) 112 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 113 114 /* WaRsForcewakeWaitTC0:ivb,hsw */ 115 __gen6_gt_wait_for_thread_c0(dev_priv); 116 } 117 118 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 119 { 120 u32 gtfifodbg; 121 122 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); 123 if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, 124 "MMIO read or write has been dropped %x\n", gtfifodbg)) 125 __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); 126 } 127 128 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 129 { 130 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 131 /* something from same cacheline, but !FORCEWAKE */ 132 __raw_posting_read(dev_priv, ECOBUS); 133 gen6_gt_check_fifodbg(dev_priv); 134 } 135 136 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) 137 { 138 __raw_i915_write32(dev_priv, FORCEWAKE_MT, 139 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 140 /* something from same cacheline, but !FORCEWAKE_MT */ 141 __raw_posting_read(dev_priv, ECOBUS); 142 gen6_gt_check_fifodbg(dev_priv); 143 } 144 145 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 146 { 147 int ret = 0; 148 149 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 150 int loop = 500; 151 u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES); 152 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 153 udelay(10); 154 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES); 155 } 156 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 157 ++ret; 158 dev_priv->uncore.fifo_count = fifo; 159 } 160 dev_priv->uncore.fifo_count--; 161 162 return ret; 163 } 164 165 static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) 166 { 167 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 168 _MASKED_BIT_DISABLE(0xffff)); 169 /* something from same cacheline, but !FORCEWAKE_VLV */ 170 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); 171 } 172 173 static void vlv_force_wake_get(struct drm_i915_private *dev_priv) 174 { 175 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0, 176 FORCEWAKE_ACK_TIMEOUT_MS)) 177 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 178 179 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 180 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 181 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 182 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 183 184 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL), 185 FORCEWAKE_ACK_TIMEOUT_MS)) 186 DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n"); 187 188 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) & 189 FORCEWAKE_KERNEL), 190 FORCEWAKE_ACK_TIMEOUT_MS)) 191 DRM_ERROR("Timed out waiting for media to ack forcewake request.\n"); 192 193 /* WaRsForcewakeWaitTC0:vlv */ 194 __gen6_gt_wait_for_thread_c0(dev_priv); 195 } 196 197 static void vlv_force_wake_put(struct drm_i915_private *dev_priv) 198 { 199 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 200 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 201 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 202 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 203 /* The below doubles as a POSTING_READ */ 204 gen6_gt_check_fifodbg(dev_priv); 205 } 206 207 void intel_uncore_early_sanitize(struct drm_device *dev) 208 { 209 struct drm_i915_private *dev_priv = dev->dev_private; 210 211 if (HAS_FPGA_DBG_UNCLAIMED(dev)) 212 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 213 } 214 215 void intel_uncore_init(struct drm_device *dev) 216 { 217 struct drm_i915_private *dev_priv = dev->dev_private; 218 219 if (IS_VALLEYVIEW(dev)) { 220 dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get; 221 dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put; 222 } else if (IS_HASWELL(dev)) { 223 dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get; 224 dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put; 225 } else if (IS_IVYBRIDGE(dev)) { 226 u32 ecobus; 227 228 /* IVB configs may use multi-threaded forcewake */ 229 230 /* A small trick here - if the bios hasn't configured 231 * MT forcewake, and if the device is in RC6, then 232 * force_wake_mt_get will not wake the device and the 233 * ECOBUS read will return zero. Which will be 234 * (correctly) interpreted by the test below as MT 235 * forcewake being disabled. 236 */ 237 mutex_lock(&dev->struct_mutex); 238 __gen6_gt_force_wake_mt_get(dev_priv); 239 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 240 __gen6_gt_force_wake_mt_put(dev_priv); 241 mutex_unlock(&dev->struct_mutex); 242 243 if (ecobus & FORCEWAKE_MT_ENABLE) { 244 dev_priv->uncore.funcs.force_wake_get = 245 __gen6_gt_force_wake_mt_get; 246 dev_priv->uncore.funcs.force_wake_put = 247 __gen6_gt_force_wake_mt_put; 248 } else { 249 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 250 DRM_INFO("when using vblank-synced partial screen updates.\n"); 251 dev_priv->uncore.funcs.force_wake_get = 252 __gen6_gt_force_wake_get; 253 dev_priv->uncore.funcs.force_wake_put = 254 __gen6_gt_force_wake_put; 255 } 256 } else if (IS_GEN6(dev)) { 257 dev_priv->uncore.funcs.force_wake_get = 258 __gen6_gt_force_wake_get; 259 dev_priv->uncore.funcs.force_wake_put = 260 __gen6_gt_force_wake_put; 261 } 262 } 263 264 void intel_uncore_sanitize(struct drm_device *dev) 265 { 266 struct drm_i915_private *dev_priv = dev->dev_private; 267 268 if (IS_VALLEYVIEW(dev)) { 269 vlv_force_wake_reset(dev_priv); 270 } else if (INTEL_INFO(dev)->gen >= 6) { 271 __gen6_gt_force_wake_reset(dev_priv); 272 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 273 __gen6_gt_force_wake_mt_reset(dev_priv); 274 } 275 276 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 277 intel_disable_gt_powersave(dev); 278 } 279 280 /* 281 * Generally this is called implicitly by the register read function. However, 282 * if some sequence requires the GT to not power down then this function should 283 * be called at the beginning of the sequence followed by a call to 284 * gen6_gt_force_wake_put() at the end of the sequence. 285 */ 286 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 287 { 288 unsigned long irqflags; 289 290 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 291 if (dev_priv->uncore.forcewake_count++ == 0) 292 dev_priv->uncore.funcs.force_wake_get(dev_priv); 293 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 294 } 295 296 /* 297 * see gen6_gt_force_wake_get() 298 */ 299 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 300 { 301 unsigned long irqflags; 302 303 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 304 if (--dev_priv->uncore.forcewake_count == 0) 305 dev_priv->uncore.funcs.force_wake_put(dev_priv); 306 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 307 } 308 309 /* We give fast paths for the really cool registers */ 310 #define NEEDS_FORCE_WAKE(dev_priv, reg) \ 311 ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ 312 ((reg) < 0x40000) && \ 313 ((reg) != FORCEWAKE)) 314 315 static void 316 ilk_dummy_write(struct drm_i915_private *dev_priv) 317 { 318 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 319 * the chip from rc6 before touching it for real. MI_MODE is masked, 320 * hence harmless to write 0 into. */ 321 __raw_i915_write32(dev_priv, MI_MODE, 0); 322 } 323 324 static void 325 hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) 326 { 327 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && 328 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 329 DRM_ERROR("Unknown unclaimed register before writing to %x\n", 330 reg); 331 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 332 } 333 } 334 335 static void 336 hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) 337 { 338 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && 339 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 340 DRM_ERROR("Unclaimed write to %x\n", reg); 341 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 342 } 343 } 344 345 #define __i915_read(x) \ 346 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \ 347 unsigned long irqflags; \ 348 u##x val = 0; \ 349 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ 350 if (dev_priv->info->gen == 5) \ 351 ilk_dummy_write(dev_priv); \ 352 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 353 if (dev_priv->uncore.forcewake_count == 0) \ 354 dev_priv->uncore.funcs.force_wake_get(dev_priv); \ 355 val = __raw_i915_read##x(dev_priv, reg); \ 356 if (dev_priv->uncore.forcewake_count == 0) \ 357 dev_priv->uncore.funcs.force_wake_put(dev_priv); \ 358 } else { \ 359 val = __raw_i915_read##x(dev_priv, reg); \ 360 } \ 361 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 362 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 363 return val; \ 364 } 365 366 __i915_read(8) 367 __i915_read(16) 368 __i915_read(32) 369 __i915_read(64) 370 #undef __i915_read 371 372 #define __i915_write(x) \ 373 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \ 374 unsigned long irqflags; \ 375 u32 __fifo_ret = 0; \ 376 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 377 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ 378 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 379 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 380 } \ 381 if (dev_priv->info->gen == 5) \ 382 ilk_dummy_write(dev_priv); \ 383 hsw_unclaimed_reg_clear(dev_priv, reg); \ 384 __raw_i915_write##x(dev_priv, reg, val); \ 385 if (unlikely(__fifo_ret)) { \ 386 gen6_gt_check_fifodbg(dev_priv); \ 387 } \ 388 hsw_unclaimed_reg_check(dev_priv, reg); \ 389 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 390 } 391 __i915_write(8) 392 __i915_write(16) 393 __i915_write(32) 394 __i915_write(64) 395 #undef __i915_write 396 397 static const struct register_whitelist { 398 uint64_t offset; 399 uint32_t size; 400 uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 401 } whitelist[] = { 402 { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 }, 403 }; 404 405 int i915_reg_read_ioctl(struct drm_device *dev, 406 void *data, struct drm_file *file) 407 { 408 struct drm_i915_private *dev_priv = dev->dev_private; 409 struct drm_i915_reg_read *reg = data; 410 struct register_whitelist const *entry = whitelist; 411 int i; 412 413 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 414 if (entry->offset == reg->offset && 415 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 416 break; 417 } 418 419 if (i == ARRAY_SIZE(whitelist)) 420 return -EINVAL; 421 422 switch (entry->size) { 423 case 8: 424 reg->val = I915_READ64(reg->offset); 425 break; 426 case 4: 427 reg->val = I915_READ(reg->offset); 428 break; 429 case 2: 430 reg->val = I915_READ16(reg->offset); 431 break; 432 case 1: 433 reg->val = I915_READ8(reg->offset); 434 break; 435 default: 436 WARN_ON(1); 437 return -EINVAL; 438 } 439 440 return 0; 441 } 442 443 static int i8xx_do_reset(struct drm_device *dev) 444 { 445 struct drm_i915_private *dev_priv = dev->dev_private; 446 447 if (IS_I85X(dev)) 448 return -ENODEV; 449 450 I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); 451 POSTING_READ(D_STATE); 452 453 if (IS_I830(dev) || IS_845G(dev)) { 454 I915_WRITE(DEBUG_RESET_I830, 455 DEBUG_RESET_DISPLAY | 456 DEBUG_RESET_RENDER | 457 DEBUG_RESET_FULL); 458 POSTING_READ(DEBUG_RESET_I830); 459 msleep(1); 460 461 I915_WRITE(DEBUG_RESET_I830, 0); 462 POSTING_READ(DEBUG_RESET_I830); 463 } 464 465 msleep(1); 466 467 I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); 468 POSTING_READ(D_STATE); 469 470 return 0; 471 } 472 473 static int i965_reset_complete(struct drm_device *dev) 474 { 475 u8 gdrst; 476 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); 477 return (gdrst & GRDOM_RESET_ENABLE) == 0; 478 } 479 480 static int i965_do_reset(struct drm_device *dev) 481 { 482 int ret; 483 484 /* 485 * Set the domains we want to reset (GRDOM/bits 2 and 3) as 486 * well as the reset bit (GR/bit 0). Setting the GR bit 487 * triggers the reset; when done, the hardware will clear it. 488 */ 489 pci_write_config_byte(dev->pdev, I965_GDRST, 490 GRDOM_RENDER | GRDOM_RESET_ENABLE); 491 ret = wait_for(i965_reset_complete(dev), 500); 492 if (ret) 493 return ret; 494 495 /* We can't reset render&media without also resetting display ... */ 496 pci_write_config_byte(dev->pdev, I965_GDRST, 497 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 498 499 ret = wait_for(i965_reset_complete(dev), 500); 500 if (ret) 501 return ret; 502 503 pci_write_config_byte(dev->pdev, I965_GDRST, 0); 504 505 return 0; 506 } 507 508 static int ironlake_do_reset(struct drm_device *dev) 509 { 510 struct drm_i915_private *dev_priv = dev->dev_private; 511 u32 gdrst; 512 int ret; 513 514 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); 515 gdrst &= ~GRDOM_MASK; 516 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 517 gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE); 518 ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); 519 if (ret) 520 return ret; 521 522 /* We can't reset render&media without also resetting display ... */ 523 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); 524 gdrst &= ~GRDOM_MASK; 525 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 526 gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE); 527 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); 528 } 529 530 static int gen6_do_reset(struct drm_device *dev) 531 { 532 struct drm_i915_private *dev_priv = dev->dev_private; 533 int ret; 534 unsigned long irqflags; 535 536 /* Hold uncore.lock across reset to prevent any register access 537 * with forcewake not set correctly 538 */ 539 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 540 541 /* Reset the chip */ 542 543 /* GEN6_GDRST is not in the gt power well, no need to check 544 * for fifo space for the write or forcewake the chip for 545 * the read 546 */ 547 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL); 548 549 /* Spin waiting for the device to ack the reset request */ 550 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 551 552 /* If reset with a user forcewake, try to restore, otherwise turn it off */ 553 if (dev_priv->uncore.forcewake_count) 554 dev_priv->uncore.funcs.force_wake_get(dev_priv); 555 else 556 dev_priv->uncore.funcs.force_wake_put(dev_priv); 557 558 /* Restore fifo count */ 559 dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES); 560 561 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 562 return ret; 563 } 564 565 int intel_gpu_reset(struct drm_device *dev) 566 { 567 switch (INTEL_INFO(dev)->gen) { 568 case 7: 569 case 6: return gen6_do_reset(dev); 570 case 5: return ironlake_do_reset(dev); 571 case 4: return i965_do_reset(dev); 572 case 2: return i8xx_do_reset(dev); 573 default: return -ENODEV; 574 } 575 } 576 577 void intel_uncore_clear_errors(struct drm_device *dev) 578 { 579 struct drm_i915_private *dev_priv = dev->dev_private; 580 581 /* XXX needs spinlock around caller's grouping */ 582 if (HAS_FPGA_DBG_UNCLAIMED(dev)) 583 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 584 } 585 586 void intel_uncore_check_errors(struct drm_device *dev) 587 { 588 struct drm_i915_private *dev_priv = dev->dev_private; 589 590 if (HAS_FPGA_DBG_UNCLAIMED(dev) && 591 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 592 DRM_ERROR("Unclaimed register before interrupt\n"); 593 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 594 } 595 } 596