1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "intel_drv.h" 26 27 #include <linux/pm_runtime.h> 28 29 #define FORCEWAKE_ACK_TIMEOUT_MS 2 30 31 #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) 32 #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__)) 33 34 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) 35 #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__)) 36 37 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 38 #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__)) 39 40 #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__)) 41 #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__)) 42 43 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) 44 45 static const char * const forcewake_domain_names[] = { 46 "render", 47 "blitter", 48 "media", 49 }; 50 51 const char * 52 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) 53 { 54 BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) != 55 FW_DOMAIN_ID_COUNT); 56 57 if (id >= 0 && id < FW_DOMAIN_ID_COUNT) 58 return forcewake_domain_names[id]; 59 60 WARN_ON(id); 61 62 return "unknown"; 63 } 64 65 static void 66 assert_device_not_suspended(struct drm_i915_private *dev_priv) 67 { 68 WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, 69 "Device suspended\n"); 70 } 71 72 static inline void 73 fw_domain_reset(const struct intel_uncore_forcewake_domain *d) 74 { 75 WARN_ON(d->reg_set == 0); 76 __raw_i915_write32(d->i915, d->reg_set, d->val_reset); 77 } 78 79 static inline void 80 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) 81 { 82 mod_timer_pinned(&d->timer, jiffies + 1); 83 } 84 85 static inline void 86 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) 87 { 88 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 89 FORCEWAKE_KERNEL) == 0, 90 FORCEWAKE_ACK_TIMEOUT_MS)) 91 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", 92 intel_uncore_forcewake_domain_to_str(d->id)); 93 } 94 95 static inline void 96 fw_domain_get(const struct intel_uncore_forcewake_domain *d) 97 { 98 __raw_i915_write32(d->i915, d->reg_set, d->val_set); 99 } 100 101 static inline void 102 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d) 103 { 104 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 105 FORCEWAKE_KERNEL), 106 FORCEWAKE_ACK_TIMEOUT_MS)) 107 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", 108 intel_uncore_forcewake_domain_to_str(d->id)); 109 } 110 111 static inline void 112 fw_domain_put(const struct intel_uncore_forcewake_domain *d) 113 { 114 __raw_i915_write32(d->i915, d->reg_set, d->val_clear); 115 } 116 117 static inline void 118 fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d) 119 { 120 /* something from same cacheline, but not from the set register */ 121 if (d->reg_post) 122 __raw_posting_read(d->i915, d->reg_post); 123 } 124 125 static void 126 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 127 { 128 struct intel_uncore_forcewake_domain *d; 129 enum forcewake_domain_id id; 130 131 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) { 132 fw_domain_wait_ack_clear(d); 133 fw_domain_get(d); 134 fw_domain_wait_ack(d); 135 } 136 } 137 138 static void 139 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 140 { 141 struct intel_uncore_forcewake_domain *d; 142 enum forcewake_domain_id id; 143 144 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) { 145 fw_domain_put(d); 146 fw_domain_posting_read(d); 147 } 148 } 149 150 static void 151 fw_domains_posting_read(struct drm_i915_private *dev_priv) 152 { 153 struct intel_uncore_forcewake_domain *d; 154 enum forcewake_domain_id id; 155 156 /* No need to do for all, just do for first found */ 157 for_each_fw_domain(d, dev_priv, id) { 158 fw_domain_posting_read(d); 159 break; 160 } 161 } 162 163 static void 164 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 165 { 166 struct intel_uncore_forcewake_domain *d; 167 enum forcewake_domain_id id; 168 169 if (dev_priv->uncore.fw_domains == 0) 170 return; 171 172 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) 173 fw_domain_reset(d); 174 175 fw_domains_posting_read(dev_priv); 176 } 177 178 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 179 { 180 /* w/a for a sporadic read returning 0 by waiting for the GT 181 * thread to wake up. 182 */ 183 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & 184 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500)) 185 DRM_ERROR("GT thread status wait timed out\n"); 186 } 187 188 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv, 189 enum forcewake_domains fw_domains) 190 { 191 fw_domains_get(dev_priv, fw_domains); 192 193 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ 194 __gen6_gt_wait_for_thread_c0(dev_priv); 195 } 196 197 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 198 { 199 u32 gtfifodbg; 200 201 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); 202 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg)) 203 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg); 204 } 205 206 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv, 207 enum forcewake_domains fw_domains) 208 { 209 fw_domains_put(dev_priv, fw_domains); 210 gen6_gt_check_fifodbg(dev_priv); 211 } 212 213 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 214 { 215 int ret = 0; 216 217 /* On VLV, FIFO will be shared by both SW and HW. 218 * So, we need to read the FREE_ENTRIES everytime */ 219 if (IS_VALLEYVIEW(dev_priv->dev)) 220 dev_priv->uncore.fifo_count = 221 __raw_i915_read32(dev_priv, GTFIFOCTL) & 222 GT_FIFO_FREE_ENTRIES_MASK; 223 224 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 225 int loop = 500; 226 u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; 227 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 228 udelay(10); 229 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; 230 } 231 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 232 ++ret; 233 dev_priv->uncore.fifo_count = fifo; 234 } 235 dev_priv->uncore.fifo_count--; 236 237 return ret; 238 } 239 240 static void intel_uncore_fw_release_timer(unsigned long arg) 241 { 242 struct intel_uncore_forcewake_domain *domain = (void *)arg; 243 unsigned long irqflags; 244 245 assert_device_not_suspended(domain->i915); 246 247 spin_lock_irqsave(&domain->i915->uncore.lock, irqflags); 248 if (WARN_ON(domain->wake_count == 0)) 249 domain->wake_count++; 250 251 if (--domain->wake_count == 0) 252 domain->i915->uncore.funcs.force_wake_put(domain->i915, 253 1 << domain->id); 254 255 spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags); 256 } 257 258 void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) 259 { 260 struct drm_i915_private *dev_priv = dev->dev_private; 261 unsigned long irqflags; 262 struct intel_uncore_forcewake_domain *domain; 263 int retry_count = 100; 264 enum forcewake_domain_id id; 265 enum forcewake_domains fw = 0, active_domains; 266 267 /* Hold uncore.lock across reset to prevent any register access 268 * with forcewake not set correctly. Wait until all pending 269 * timers are run before holding. 270 */ 271 while (1) { 272 active_domains = 0; 273 274 for_each_fw_domain(domain, dev_priv, id) { 275 if (del_timer_sync(&domain->timer) == 0) 276 continue; 277 278 intel_uncore_fw_release_timer((unsigned long)domain); 279 } 280 281 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 282 283 for_each_fw_domain(domain, dev_priv, id) { 284 if (timer_pending(&domain->timer)) 285 active_domains |= (1 << id); 286 } 287 288 if (active_domains == 0) 289 break; 290 291 if (--retry_count == 0) { 292 DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); 293 break; 294 } 295 296 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 297 cond_resched(); 298 } 299 300 WARN_ON(active_domains); 301 302 for_each_fw_domain(domain, dev_priv, id) 303 if (domain->wake_count) 304 fw |= 1 << id; 305 306 if (fw) 307 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); 308 309 fw_domains_reset(dev_priv, FORCEWAKE_ALL); 310 311 if (restore) { /* If reset with a user forcewake, try to restore */ 312 if (fw) 313 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 314 315 if (IS_GEN6(dev) || IS_GEN7(dev)) 316 dev_priv->uncore.fifo_count = 317 __raw_i915_read32(dev_priv, GTFIFOCTL) & 318 GT_FIFO_FREE_ENTRIES_MASK; 319 } 320 321 if (!restore) 322 assert_forcewakes_inactive(dev_priv); 323 324 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 325 } 326 327 static void intel_uncore_ellc_detect(struct drm_device *dev) 328 { 329 struct drm_i915_private *dev_priv = dev->dev_private; 330 331 if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && 332 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) { 333 /* The docs do not explain exactly how the calculation can be 334 * made. It is somewhat guessable, but for now, it's always 335 * 128MB. 336 * NB: We can't write IDICR yet because we do not have gt funcs 337 * set up */ 338 dev_priv->ellc_size = 128; 339 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); 340 } 341 } 342 343 static void __intel_uncore_early_sanitize(struct drm_device *dev, 344 bool restore_forcewake) 345 { 346 struct drm_i915_private *dev_priv = dev->dev_private; 347 348 if (HAS_FPGA_DBG_UNCLAIMED(dev)) 349 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 350 351 /* clear out old GT FIFO errors */ 352 if (IS_GEN6(dev) || IS_GEN7(dev)) 353 __raw_i915_write32(dev_priv, GTFIFODBG, 354 __raw_i915_read32(dev_priv, GTFIFODBG)); 355 356 intel_uncore_forcewake_reset(dev, restore_forcewake); 357 } 358 359 void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) 360 { 361 __intel_uncore_early_sanitize(dev, restore_forcewake); 362 i915_check_and_clear_faults(dev); 363 } 364 365 void intel_uncore_sanitize(struct drm_device *dev) 366 { 367 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 368 intel_disable_gt_powersave(dev); 369 } 370 371 /** 372 * intel_uncore_forcewake_get - grab forcewake domain references 373 * @dev_priv: i915 device instance 374 * @fw_domains: forcewake domains to get reference on 375 * 376 * This function can be used get GT's forcewake domain references. 377 * Normal register access will handle the forcewake domains automatically. 378 * However if some sequence requires the GT to not power down a particular 379 * forcewake domains this function should be called at the beginning of the 380 * sequence. And subsequently the reference should be dropped by symmetric 381 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains 382 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. 383 */ 384 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 385 enum forcewake_domains fw_domains) 386 { 387 unsigned long irqflags; 388 struct intel_uncore_forcewake_domain *domain; 389 enum forcewake_domain_id id; 390 391 if (!dev_priv->uncore.funcs.force_wake_get) 392 return; 393 394 WARN_ON(dev_priv->pm.suspended); 395 396 fw_domains &= dev_priv->uncore.fw_domains; 397 398 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 399 400 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { 401 if (domain->wake_count++) 402 fw_domains &= ~(1 << id); 403 } 404 405 if (fw_domains) 406 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 407 408 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 409 } 410 411 /** 412 * intel_uncore_forcewake_put - release a forcewake domain reference 413 * @dev_priv: i915 device instance 414 * @fw_domains: forcewake domains to put references 415 * 416 * This function drops the device-level forcewakes for specified 417 * domains obtained by intel_uncore_forcewake_get(). 418 */ 419 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 420 enum forcewake_domains fw_domains) 421 { 422 unsigned long irqflags; 423 struct intel_uncore_forcewake_domain *domain; 424 enum forcewake_domain_id id; 425 426 if (!dev_priv->uncore.funcs.force_wake_put) 427 return; 428 429 fw_domains &= dev_priv->uncore.fw_domains; 430 431 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 432 433 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { 434 if (WARN_ON(domain->wake_count == 0)) 435 continue; 436 437 if (--domain->wake_count) 438 continue; 439 440 domain->wake_count++; 441 fw_domain_arm_timer(domain); 442 } 443 444 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 445 } 446 447 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) 448 { 449 struct intel_uncore_forcewake_domain *domain; 450 enum forcewake_domain_id id; 451 452 if (!dev_priv->uncore.funcs.force_wake_get) 453 return; 454 455 for_each_fw_domain(domain, dev_priv, id) 456 WARN_ON(domain->wake_count); 457 } 458 459 /* We give fast paths for the really cool registers */ 460 #define NEEDS_FORCE_WAKE(dev_priv, reg) \ 461 ((reg) < 0x40000 && (reg) != FORCEWAKE) 462 463 #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) 464 465 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ 466 (REG_RANGE((reg), 0x2000, 0x4000) || \ 467 REG_RANGE((reg), 0x5000, 0x8000) || \ 468 REG_RANGE((reg), 0xB000, 0x12000) || \ 469 REG_RANGE((reg), 0x2E000, 0x30000)) 470 471 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \ 472 (REG_RANGE((reg), 0x12000, 0x14000) || \ 473 REG_RANGE((reg), 0x22000, 0x24000) || \ 474 REG_RANGE((reg), 0x30000, 0x40000)) 475 476 #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \ 477 (REG_RANGE((reg), 0x2000, 0x4000) || \ 478 REG_RANGE((reg), 0x5200, 0x8000) || \ 479 REG_RANGE((reg), 0x8300, 0x8500) || \ 480 REG_RANGE((reg), 0xB000, 0xB480) || \ 481 REG_RANGE((reg), 0xE000, 0xE800)) 482 483 #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \ 484 (REG_RANGE((reg), 0x8800, 0x8900) || \ 485 REG_RANGE((reg), 0xD000, 0xD800) || \ 486 REG_RANGE((reg), 0x12000, 0x14000) || \ 487 REG_RANGE((reg), 0x1A000, 0x1C000) || \ 488 REG_RANGE((reg), 0x1E800, 0x1EA00) || \ 489 REG_RANGE((reg), 0x30000, 0x38000)) 490 491 #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \ 492 (REG_RANGE((reg), 0x4000, 0x5000) || \ 493 REG_RANGE((reg), 0x8000, 0x8300) || \ 494 REG_RANGE((reg), 0x8500, 0x8600) || \ 495 REG_RANGE((reg), 0x9000, 0xB000) || \ 496 REG_RANGE((reg), 0xF000, 0x10000)) 497 498 #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \ 499 REG_RANGE((reg), 0xB00, 0x2000) 500 501 #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \ 502 (REG_RANGE((reg), 0x2000, 0x2700) || \ 503 REG_RANGE((reg), 0x3000, 0x4000) || \ 504 REG_RANGE((reg), 0x5200, 0x8000) || \ 505 REG_RANGE((reg), 0x8140, 0x8160) || \ 506 REG_RANGE((reg), 0x8300, 0x8500) || \ 507 REG_RANGE((reg), 0x8C00, 0x8D00) || \ 508 REG_RANGE((reg), 0xB000, 0xB480) || \ 509 REG_RANGE((reg), 0xE000, 0xE900) || \ 510 REG_RANGE((reg), 0x24400, 0x24800)) 511 512 #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \ 513 (REG_RANGE((reg), 0x8130, 0x8140) || \ 514 REG_RANGE((reg), 0x8800, 0x8A00) || \ 515 REG_RANGE((reg), 0xD000, 0xD800) || \ 516 REG_RANGE((reg), 0x12000, 0x14000) || \ 517 REG_RANGE((reg), 0x1A000, 0x1EA00) || \ 518 REG_RANGE((reg), 0x30000, 0x40000)) 519 520 #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \ 521 REG_RANGE((reg), 0x9400, 0x9800) 522 523 #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \ 524 ((reg) < 0x40000 &&\ 525 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \ 526 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \ 527 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \ 528 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) 529 530 static void 531 ilk_dummy_write(struct drm_i915_private *dev_priv) 532 { 533 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 534 * the chip from rc6 before touching it for real. MI_MODE is masked, 535 * hence harmless to write 0 into. */ 536 __raw_i915_write32(dev_priv, MI_MODE, 0); 537 } 538 539 static void 540 hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read, 541 bool before) 542 { 543 const char *op = read ? "reading" : "writing to"; 544 const char *when = before ? "before" : "after"; 545 546 if (!i915.mmio_debug) 547 return; 548 549 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 550 WARN(1, "Unclaimed register detected %s %s register 0x%x\n", 551 when, op, reg); 552 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 553 } 554 } 555 556 static void 557 hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) 558 { 559 if (i915.mmio_debug) 560 return; 561 562 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 563 DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem."); 564 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 565 } 566 } 567 568 #define GEN2_READ_HEADER(x) \ 569 u##x val = 0; \ 570 assert_device_not_suspended(dev_priv); 571 572 #define GEN2_READ_FOOTER \ 573 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 574 return val 575 576 #define __gen2_read(x) \ 577 static u##x \ 578 gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 579 GEN2_READ_HEADER(x); \ 580 val = __raw_i915_read##x(dev_priv, reg); \ 581 GEN2_READ_FOOTER; \ 582 } 583 584 #define __gen5_read(x) \ 585 static u##x \ 586 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 587 GEN2_READ_HEADER(x); \ 588 ilk_dummy_write(dev_priv); \ 589 val = __raw_i915_read##x(dev_priv, reg); \ 590 GEN2_READ_FOOTER; \ 591 } 592 593 __gen5_read(8) 594 __gen5_read(16) 595 __gen5_read(32) 596 __gen5_read(64) 597 __gen2_read(8) 598 __gen2_read(16) 599 __gen2_read(32) 600 __gen2_read(64) 601 602 #undef __gen5_read 603 #undef __gen2_read 604 605 #undef GEN2_READ_FOOTER 606 #undef GEN2_READ_HEADER 607 608 #define GEN6_READ_HEADER(x) \ 609 unsigned long irqflags; \ 610 u##x val = 0; \ 611 assert_device_not_suspended(dev_priv); \ 612 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 613 614 #define GEN6_READ_FOOTER \ 615 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ 616 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 617 return val 618 619 static inline void __force_wake_get(struct drm_i915_private *dev_priv, 620 enum forcewake_domains fw_domains) 621 { 622 struct intel_uncore_forcewake_domain *domain; 623 enum forcewake_domain_id id; 624 625 if (WARN_ON(!fw_domains)) 626 return; 627 628 /* Ideally GCC would be constant-fold and eliminate this loop */ 629 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { 630 if (domain->wake_count) { 631 fw_domains &= ~(1 << id); 632 continue; 633 } 634 635 domain->wake_count++; 636 fw_domain_arm_timer(domain); 637 } 638 639 if (fw_domains) 640 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 641 } 642 643 #define __gen6_read(x) \ 644 static u##x \ 645 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 646 GEN6_READ_HEADER(x); \ 647 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ 648 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) \ 649 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 650 val = __raw_i915_read##x(dev_priv, reg); \ 651 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ 652 GEN6_READ_FOOTER; \ 653 } 654 655 #define __vlv_read(x) \ 656 static u##x \ 657 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 658 GEN6_READ_HEADER(x); \ 659 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \ 660 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 661 else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \ 662 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 663 val = __raw_i915_read##x(dev_priv, reg); \ 664 GEN6_READ_FOOTER; \ 665 } 666 667 #define __chv_read(x) \ 668 static u##x \ 669 chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 670 GEN6_READ_HEADER(x); \ 671 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \ 672 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 673 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \ 674 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 675 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \ 676 __force_wake_get(dev_priv, \ 677 FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \ 678 val = __raw_i915_read##x(dev_priv, reg); \ 679 GEN6_READ_FOOTER; \ 680 } 681 682 #define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \ 683 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) 684 685 #define __gen9_read(x) \ 686 static u##x \ 687 gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 688 enum forcewake_domains fw_engine; \ 689 GEN6_READ_HEADER(x); \ 690 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) \ 691 fw_engine = 0; \ 692 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ 693 fw_engine = FORCEWAKE_RENDER; \ 694 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ 695 fw_engine = FORCEWAKE_MEDIA; \ 696 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \ 697 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ 698 else \ 699 fw_engine = FORCEWAKE_BLITTER; \ 700 if (fw_engine) \ 701 __force_wake_get(dev_priv, fw_engine); \ 702 val = __raw_i915_read##x(dev_priv, reg); \ 703 GEN6_READ_FOOTER; \ 704 } 705 706 __gen9_read(8) 707 __gen9_read(16) 708 __gen9_read(32) 709 __gen9_read(64) 710 __chv_read(8) 711 __chv_read(16) 712 __chv_read(32) 713 __chv_read(64) 714 __vlv_read(8) 715 __vlv_read(16) 716 __vlv_read(32) 717 __vlv_read(64) 718 __gen6_read(8) 719 __gen6_read(16) 720 __gen6_read(32) 721 __gen6_read(64) 722 723 #undef __gen9_read 724 #undef __chv_read 725 #undef __vlv_read 726 #undef __gen6_read 727 #undef GEN6_READ_FOOTER 728 #undef GEN6_READ_HEADER 729 730 #define GEN2_WRITE_HEADER \ 731 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 732 assert_device_not_suspended(dev_priv); \ 733 734 #define GEN2_WRITE_FOOTER 735 736 #define __gen2_write(x) \ 737 static void \ 738 gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 739 GEN2_WRITE_HEADER; \ 740 __raw_i915_write##x(dev_priv, reg, val); \ 741 GEN2_WRITE_FOOTER; \ 742 } 743 744 #define __gen5_write(x) \ 745 static void \ 746 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 747 GEN2_WRITE_HEADER; \ 748 ilk_dummy_write(dev_priv); \ 749 __raw_i915_write##x(dev_priv, reg, val); \ 750 GEN2_WRITE_FOOTER; \ 751 } 752 753 __gen5_write(8) 754 __gen5_write(16) 755 __gen5_write(32) 756 __gen5_write(64) 757 __gen2_write(8) 758 __gen2_write(16) 759 __gen2_write(32) 760 __gen2_write(64) 761 762 #undef __gen5_write 763 #undef __gen2_write 764 765 #undef GEN2_WRITE_FOOTER 766 #undef GEN2_WRITE_HEADER 767 768 #define GEN6_WRITE_HEADER \ 769 unsigned long irqflags; \ 770 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 771 assert_device_not_suspended(dev_priv); \ 772 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) 773 774 #define GEN6_WRITE_FOOTER \ 775 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) 776 777 #define __gen6_write(x) \ 778 static void \ 779 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 780 u32 __fifo_ret = 0; \ 781 GEN6_WRITE_HEADER; \ 782 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 783 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 784 } \ 785 __raw_i915_write##x(dev_priv, reg, val); \ 786 if (unlikely(__fifo_ret)) { \ 787 gen6_gt_check_fifodbg(dev_priv); \ 788 } \ 789 GEN6_WRITE_FOOTER; \ 790 } 791 792 #define __hsw_write(x) \ 793 static void \ 794 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 795 u32 __fifo_ret = 0; \ 796 GEN6_WRITE_HEADER; \ 797 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 798 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 799 } \ 800 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 801 __raw_i915_write##x(dev_priv, reg, val); \ 802 if (unlikely(__fifo_ret)) { \ 803 gen6_gt_check_fifodbg(dev_priv); \ 804 } \ 805 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 806 hsw_unclaimed_reg_detect(dev_priv); \ 807 GEN6_WRITE_FOOTER; \ 808 } 809 810 static const u32 gen8_shadowed_regs[] = { 811 FORCEWAKE_MT, 812 GEN6_RPNSWREQ, 813 GEN6_RC_VIDEO_FREQ, 814 RING_TAIL(RENDER_RING_BASE), 815 RING_TAIL(GEN6_BSD_RING_BASE), 816 RING_TAIL(VEBOX_RING_BASE), 817 RING_TAIL(BLT_RING_BASE), 818 /* TODO: Other registers are not yet used */ 819 }; 820 821 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) 822 { 823 int i; 824 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) 825 if (reg == gen8_shadowed_regs[i]) 826 return true; 827 828 return false; 829 } 830 831 #define __gen8_write(x) \ 832 static void \ 833 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 834 GEN6_WRITE_HEADER; \ 835 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 836 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \ 837 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 838 __raw_i915_write##x(dev_priv, reg, val); \ 839 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 840 hsw_unclaimed_reg_detect(dev_priv); \ 841 GEN6_WRITE_FOOTER; \ 842 } 843 844 #define __chv_write(x) \ 845 static void \ 846 chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 847 bool shadowed = is_gen8_shadowed(dev_priv, reg); \ 848 GEN6_WRITE_HEADER; \ 849 if (!shadowed) { \ 850 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \ 851 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 852 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \ 853 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 854 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \ 855 __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \ 856 } \ 857 __raw_i915_write##x(dev_priv, reg, val); \ 858 GEN6_WRITE_FOOTER; \ 859 } 860 861 static const u32 gen9_shadowed_regs[] = { 862 RING_TAIL(RENDER_RING_BASE), 863 RING_TAIL(GEN6_BSD_RING_BASE), 864 RING_TAIL(VEBOX_RING_BASE), 865 RING_TAIL(BLT_RING_BASE), 866 FORCEWAKE_BLITTER_GEN9, 867 FORCEWAKE_RENDER_GEN9, 868 FORCEWAKE_MEDIA_GEN9, 869 GEN6_RPNSWREQ, 870 GEN6_RC_VIDEO_FREQ, 871 /* TODO: Other registers are not yet used */ 872 }; 873 874 static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg) 875 { 876 int i; 877 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++) 878 if (reg == gen9_shadowed_regs[i]) 879 return true; 880 881 return false; 882 } 883 884 #define __gen9_write(x) \ 885 static void \ 886 gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \ 887 bool trace) { \ 888 enum forcewake_domains fw_engine; \ 889 GEN6_WRITE_HEADER; \ 890 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \ 891 is_gen9_shadowed(dev_priv, reg)) \ 892 fw_engine = 0; \ 893 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ 894 fw_engine = FORCEWAKE_RENDER; \ 895 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ 896 fw_engine = FORCEWAKE_MEDIA; \ 897 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \ 898 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ 899 else \ 900 fw_engine = FORCEWAKE_BLITTER; \ 901 if (fw_engine) \ 902 __force_wake_get(dev_priv, fw_engine); \ 903 __raw_i915_write##x(dev_priv, reg, val); \ 904 GEN6_WRITE_FOOTER; \ 905 } 906 907 __gen9_write(8) 908 __gen9_write(16) 909 __gen9_write(32) 910 __gen9_write(64) 911 __chv_write(8) 912 __chv_write(16) 913 __chv_write(32) 914 __chv_write(64) 915 __gen8_write(8) 916 __gen8_write(16) 917 __gen8_write(32) 918 __gen8_write(64) 919 __hsw_write(8) 920 __hsw_write(16) 921 __hsw_write(32) 922 __hsw_write(64) 923 __gen6_write(8) 924 __gen6_write(16) 925 __gen6_write(32) 926 __gen6_write(64) 927 928 #undef __gen9_write 929 #undef __chv_write 930 #undef __gen8_write 931 #undef __hsw_write 932 #undef __gen6_write 933 #undef GEN6_WRITE_FOOTER 934 #undef GEN6_WRITE_HEADER 935 936 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \ 937 do { \ 938 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \ 939 dev_priv->uncore.funcs.mmio_writew = x##_write16; \ 940 dev_priv->uncore.funcs.mmio_writel = x##_write32; \ 941 dev_priv->uncore.funcs.mmio_writeq = x##_write64; \ 942 } while (0) 943 944 #define ASSIGN_READ_MMIO_VFUNCS(x) \ 945 do { \ 946 dev_priv->uncore.funcs.mmio_readb = x##_read8; \ 947 dev_priv->uncore.funcs.mmio_readw = x##_read16; \ 948 dev_priv->uncore.funcs.mmio_readl = x##_read32; \ 949 dev_priv->uncore.funcs.mmio_readq = x##_read64; \ 950 } while (0) 951 952 953 static void fw_domain_init(struct drm_i915_private *dev_priv, 954 enum forcewake_domain_id domain_id, 955 u32 reg_set, u32 reg_ack) 956 { 957 struct intel_uncore_forcewake_domain *d; 958 959 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) 960 return; 961 962 d = &dev_priv->uncore.fw_domain[domain_id]; 963 964 WARN_ON(d->wake_count); 965 966 d->wake_count = 0; 967 d->reg_set = reg_set; 968 d->reg_ack = reg_ack; 969 970 if (IS_GEN6(dev_priv)) { 971 d->val_reset = 0; 972 d->val_set = FORCEWAKE_KERNEL; 973 d->val_clear = 0; 974 } else { 975 d->val_reset = _MASKED_BIT_DISABLE(0xffff); 976 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); 977 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); 978 } 979 980 if (IS_VALLEYVIEW(dev_priv)) 981 d->reg_post = FORCEWAKE_ACK_VLV; 982 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) 983 d->reg_post = ECOBUS; 984 else 985 d->reg_post = 0; 986 987 d->i915 = dev_priv; 988 d->id = domain_id; 989 990 setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d); 991 992 dev_priv->uncore.fw_domains |= (1 << domain_id); 993 994 fw_domain_reset(d); 995 } 996 997 static void intel_uncore_fw_domains_init(struct drm_device *dev) 998 { 999 struct drm_i915_private *dev_priv = dev->dev_private; 1000 1001 if (INTEL_INFO(dev_priv->dev)->gen <= 5) 1002 return; 1003 1004 if (IS_GEN9(dev)) { 1005 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1006 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1007 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1008 FORCEWAKE_RENDER_GEN9, 1009 FORCEWAKE_ACK_RENDER_GEN9); 1010 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, 1011 FORCEWAKE_BLITTER_GEN9, 1012 FORCEWAKE_ACK_BLITTER_GEN9); 1013 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1014 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1015 } else if (IS_VALLEYVIEW(dev)) { 1016 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1017 if (!IS_CHERRYVIEW(dev)) 1018 dev_priv->uncore.funcs.force_wake_put = 1019 fw_domains_put_with_fifo; 1020 else 1021 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1022 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1023 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1024 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1025 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1026 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1027 dev_priv->uncore.funcs.force_wake_get = 1028 fw_domains_get_with_thread_status; 1029 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1030 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1031 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1032 } else if (IS_IVYBRIDGE(dev)) { 1033 u32 ecobus; 1034 1035 /* IVB configs may use multi-threaded forcewake */ 1036 1037 /* A small trick here - if the bios hasn't configured 1038 * MT forcewake, and if the device is in RC6, then 1039 * force_wake_mt_get will not wake the device and the 1040 * ECOBUS read will return zero. Which will be 1041 * (correctly) interpreted by the test below as MT 1042 * forcewake being disabled. 1043 */ 1044 dev_priv->uncore.funcs.force_wake_get = 1045 fw_domains_get_with_thread_status; 1046 dev_priv->uncore.funcs.force_wake_put = 1047 fw_domains_put_with_fifo; 1048 1049 /* We need to init first for ECOBUS access and then 1050 * determine later if we want to reinit, in case of MT access is 1051 * not working. In this stage we don't know which flavour this 1052 * ivb is, so it is better to reset also the gen6 fw registers 1053 * before the ecobus check. 1054 */ 1055 1056 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 1057 __raw_posting_read(dev_priv, ECOBUS); 1058 1059 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1060 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1061 1062 mutex_lock(&dev->struct_mutex); 1063 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); 1064 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1065 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); 1066 mutex_unlock(&dev->struct_mutex); 1067 1068 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1069 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1070 DRM_INFO("when using vblank-synced partial screen updates.\n"); 1071 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1072 FORCEWAKE, FORCEWAKE_ACK); 1073 } 1074 } else if (IS_GEN6(dev)) { 1075 dev_priv->uncore.funcs.force_wake_get = 1076 fw_domains_get_with_thread_status; 1077 dev_priv->uncore.funcs.force_wake_put = 1078 fw_domains_put_with_fifo; 1079 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1080 FORCEWAKE, FORCEWAKE_ACK); 1081 } 1082 1083 /* All future platforms are expected to require complex power gating */ 1084 WARN_ON(dev_priv->uncore.fw_domains == 0); 1085 } 1086 1087 void intel_uncore_init(struct drm_device *dev) 1088 { 1089 struct drm_i915_private *dev_priv = dev->dev_private; 1090 1091 intel_uncore_ellc_detect(dev); 1092 intel_uncore_fw_domains_init(dev); 1093 __intel_uncore_early_sanitize(dev, false); 1094 1095 switch (INTEL_INFO(dev)->gen) { 1096 default: 1097 MISSING_CASE(INTEL_INFO(dev)->gen); 1098 return; 1099 case 9: 1100 ASSIGN_WRITE_MMIO_VFUNCS(gen9); 1101 ASSIGN_READ_MMIO_VFUNCS(gen9); 1102 break; 1103 case 8: 1104 if (IS_CHERRYVIEW(dev)) { 1105 ASSIGN_WRITE_MMIO_VFUNCS(chv); 1106 ASSIGN_READ_MMIO_VFUNCS(chv); 1107 1108 } else { 1109 ASSIGN_WRITE_MMIO_VFUNCS(gen8); 1110 ASSIGN_READ_MMIO_VFUNCS(gen6); 1111 } 1112 break; 1113 case 7: 1114 case 6: 1115 if (IS_HASWELL(dev)) { 1116 ASSIGN_WRITE_MMIO_VFUNCS(hsw); 1117 } else { 1118 ASSIGN_WRITE_MMIO_VFUNCS(gen6); 1119 } 1120 1121 if (IS_VALLEYVIEW(dev)) { 1122 ASSIGN_READ_MMIO_VFUNCS(vlv); 1123 } else { 1124 ASSIGN_READ_MMIO_VFUNCS(gen6); 1125 } 1126 break; 1127 case 5: 1128 ASSIGN_WRITE_MMIO_VFUNCS(gen5); 1129 ASSIGN_READ_MMIO_VFUNCS(gen5); 1130 break; 1131 case 4: 1132 case 3: 1133 case 2: 1134 ASSIGN_WRITE_MMIO_VFUNCS(gen2); 1135 ASSIGN_READ_MMIO_VFUNCS(gen2); 1136 break; 1137 } 1138 1139 i915_check_and_clear_faults(dev); 1140 } 1141 #undef ASSIGN_WRITE_MMIO_VFUNCS 1142 #undef ASSIGN_READ_MMIO_VFUNCS 1143 1144 void intel_uncore_fini(struct drm_device *dev) 1145 { 1146 /* Paranoia: make sure we have disabled everything before we exit. */ 1147 intel_uncore_sanitize(dev); 1148 intel_uncore_forcewake_reset(dev, false); 1149 } 1150 1151 #define GEN_RANGE(l, h) GENMASK(h, l) 1152 1153 static const struct register_whitelist { 1154 uint64_t offset; 1155 uint32_t size; 1156 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 1157 uint32_t gen_bitmask; 1158 } whitelist[] = { 1159 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) }, 1160 }; 1161 1162 int i915_reg_read_ioctl(struct drm_device *dev, 1163 void *data, struct drm_file *file) 1164 { 1165 struct drm_i915_private *dev_priv = dev->dev_private; 1166 struct drm_i915_reg_read *reg = data; 1167 struct register_whitelist const *entry = whitelist; 1168 int i, ret = 0; 1169 1170 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1171 if (entry->offset == reg->offset && 1172 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1173 break; 1174 } 1175 1176 if (i == ARRAY_SIZE(whitelist)) 1177 return -EINVAL; 1178 1179 intel_runtime_pm_get(dev_priv); 1180 1181 switch (entry->size) { 1182 case 8: 1183 reg->val = I915_READ64(reg->offset); 1184 break; 1185 case 4: 1186 reg->val = I915_READ(reg->offset); 1187 break; 1188 case 2: 1189 reg->val = I915_READ16(reg->offset); 1190 break; 1191 case 1: 1192 reg->val = I915_READ8(reg->offset); 1193 break; 1194 default: 1195 MISSING_CASE(entry->size); 1196 ret = -EINVAL; 1197 goto out; 1198 } 1199 1200 out: 1201 intel_runtime_pm_put(dev_priv); 1202 return ret; 1203 } 1204 1205 int i915_get_reset_stats_ioctl(struct drm_device *dev, 1206 void *data, struct drm_file *file) 1207 { 1208 struct drm_i915_private *dev_priv = dev->dev_private; 1209 struct drm_i915_reset_stats *args = data; 1210 struct i915_ctx_hang_stats *hs; 1211 struct intel_context *ctx; 1212 int ret; 1213 1214 if (args->flags || args->pad) 1215 return -EINVAL; 1216 1217 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN)) 1218 return -EPERM; 1219 1220 ret = mutex_lock_interruptible(&dev->struct_mutex); 1221 if (ret) 1222 return ret; 1223 1224 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); 1225 if (IS_ERR(ctx)) { 1226 mutex_unlock(&dev->struct_mutex); 1227 return PTR_ERR(ctx); 1228 } 1229 hs = &ctx->hang_stats; 1230 1231 if (capable(CAP_SYS_ADMIN)) 1232 args->reset_count = i915_reset_count(&dev_priv->gpu_error); 1233 else 1234 args->reset_count = 0; 1235 1236 args->batch_active = hs->batch_active; 1237 args->batch_pending = hs->batch_pending; 1238 1239 mutex_unlock(&dev->struct_mutex); 1240 1241 return 0; 1242 } 1243 1244 static int i915_reset_complete(struct drm_device *dev) 1245 { 1246 u8 gdrst; 1247 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1248 return (gdrst & GRDOM_RESET_STATUS) == 0; 1249 } 1250 1251 static int i915_do_reset(struct drm_device *dev) 1252 { 1253 /* assert reset for at least 20 usec */ 1254 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1255 udelay(20); 1256 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1257 1258 return wait_for(i915_reset_complete(dev), 500); 1259 } 1260 1261 static int g4x_reset_complete(struct drm_device *dev) 1262 { 1263 u8 gdrst; 1264 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1265 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1266 } 1267 1268 static int g33_do_reset(struct drm_device *dev) 1269 { 1270 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1271 return wait_for(g4x_reset_complete(dev), 500); 1272 } 1273 1274 static int g4x_do_reset(struct drm_device *dev) 1275 { 1276 struct drm_i915_private *dev_priv = dev->dev_private; 1277 int ret; 1278 1279 pci_write_config_byte(dev->pdev, I915_GDRST, 1280 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1281 ret = wait_for(g4x_reset_complete(dev), 500); 1282 if (ret) 1283 return ret; 1284 1285 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1286 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1287 POSTING_READ(VDECCLK_GATE_D); 1288 1289 pci_write_config_byte(dev->pdev, I915_GDRST, 1290 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1291 ret = wait_for(g4x_reset_complete(dev), 500); 1292 if (ret) 1293 return ret; 1294 1295 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1296 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1297 POSTING_READ(VDECCLK_GATE_D); 1298 1299 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1300 1301 return 0; 1302 } 1303 1304 static int ironlake_do_reset(struct drm_device *dev) 1305 { 1306 struct drm_i915_private *dev_priv = dev->dev_private; 1307 int ret; 1308 1309 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1310 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 1311 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1312 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1313 if (ret) 1314 return ret; 1315 1316 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1317 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1318 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1319 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1320 if (ret) 1321 return ret; 1322 1323 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0); 1324 1325 return 0; 1326 } 1327 1328 static int gen6_do_reset(struct drm_device *dev) 1329 { 1330 struct drm_i915_private *dev_priv = dev->dev_private; 1331 int ret; 1332 1333 /* Reset the chip */ 1334 1335 /* GEN6_GDRST is not in the gt power well, no need to check 1336 * for fifo space for the write or forcewake the chip for 1337 * the read 1338 */ 1339 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL); 1340 1341 /* Spin waiting for the device to ack the reset request */ 1342 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 1343 1344 intel_uncore_forcewake_reset(dev, true); 1345 1346 return ret; 1347 } 1348 1349 int intel_gpu_reset(struct drm_device *dev) 1350 { 1351 if (INTEL_INFO(dev)->gen >= 6) 1352 return gen6_do_reset(dev); 1353 else if (IS_GEN5(dev)) 1354 return ironlake_do_reset(dev); 1355 else if (IS_G4X(dev)) 1356 return g4x_do_reset(dev); 1357 else if (IS_G33(dev)) 1358 return g33_do_reset(dev); 1359 else if (INTEL_INFO(dev)->gen >= 3) 1360 return i915_do_reset(dev); 1361 else 1362 return -ENODEV; 1363 } 1364 1365 void intel_uncore_check_errors(struct drm_device *dev) 1366 { 1367 struct drm_i915_private *dev_priv = dev->dev_private; 1368 1369 if (HAS_FPGA_DBG_UNCLAIMED(dev) && 1370 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 1371 DRM_ERROR("Unclaimed register before interrupt\n"); 1372 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 1373 } 1374 } 1375