1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi/drm/i915_drm.h> 34 #include <uapi/drm/drm_fourcc.h> 35 36 #include <linux/io-mapping.h> 37 #include <linux/i2c.h> 38 #include <linux/i2c-algo-bit.h> 39 #include <linux/backlight.h> 40 #include <linux/hashtable.h> 41 #include <linux/intel-iommu.h> 42 #include <linux/kref.h> 43 #include <linux/pm_qos.h> 44 #include <linux/reservation.h> 45 #include <linux/shmem_fs.h> 46 47 #include <drm/drmP.h> 48 #include <drm/intel-gtt.h> 49 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 50 #include <drm/drm_gem.h> 51 #include <drm/drm_auth.h> 52 53 #include "i915_params.h" 54 #include "i915_reg.h" 55 56 #include "intel_bios.h" 57 #include "intel_dpll_mgr.h" 58 #include "intel_guc.h" 59 #include "intel_lrc.h" 60 #include "intel_ringbuffer.h" 61 62 #include "i915_gem.h" 63 #include "i915_gem_fence_reg.h" 64 #include "i915_gem_object.h" 65 #include "i915_gem_gtt.h" 66 #include "i915_gem_render_state.h" 67 #include "i915_gem_request.h" 68 #include "i915_gem_timeline.h" 69 70 #include "i915_vma.h" 71 72 #include "intel_gvt.h" 73 74 /* General customization: 75 */ 76 77 #define DRIVER_NAME "i915" 78 #define DRIVER_DESC "Intel Graphics" 79 #define DRIVER_DATE "20161121" 80 #define DRIVER_TIMESTAMP 1479717903 81 82 #undef WARN_ON 83 /* Many gcc seem to no see through this and fall over :( */ 84 #if 0 85 #define WARN_ON(x) ({ \ 86 bool __i915_warn_cond = (x); \ 87 if (__builtin_constant_p(__i915_warn_cond)) \ 88 BUILD_BUG_ON(__i915_warn_cond); \ 89 WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) 90 #else 91 #define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 92 #endif 93 94 #undef WARN_ON_ONCE 95 #define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") 96 97 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ 98 (long) (x), __func__); 99 100 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 101 * WARN_ON()) for hw state sanity checks to check for unexpected conditions 102 * which may not necessarily be a user visible problem. This will either 103 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to 104 * enable distros and users to tailor their preferred amount of i915 abrt 105 * spam. 106 */ 107 #define I915_STATE_WARN(condition, format...) ({ \ 108 int __ret_warn_on = !!(condition); \ 109 if (unlikely(__ret_warn_on)) \ 110 if (!WARN(i915.verbose_state_checks, format)) \ 111 DRM_ERROR(format); \ 112 unlikely(__ret_warn_on); \ 113 }) 114 115 #define I915_STATE_WARN_ON(x) \ 116 I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") 117 118 bool __i915_inject_load_failure(const char *func, int line); 119 #define i915_inject_load_failure() \ 120 __i915_inject_load_failure(__func__, __LINE__) 121 122 static inline const char *yesno(bool v) 123 { 124 return v ? "yes" : "no"; 125 } 126 127 static inline const char *onoff(bool v) 128 { 129 return v ? "on" : "off"; 130 } 131 132 static inline const char *enableddisabled(bool v) 133 { 134 return v ? "enabled" : "disabled"; 135 } 136 137 enum pipe { 138 INVALID_PIPE = -1, 139 PIPE_A = 0, 140 PIPE_B, 141 PIPE_C, 142 _PIPE_EDP, 143 I915_MAX_PIPES = _PIPE_EDP 144 }; 145 #define pipe_name(p) ((p) + 'A') 146 147 enum transcoder { 148 TRANSCODER_A = 0, 149 TRANSCODER_B, 150 TRANSCODER_C, 151 TRANSCODER_EDP, 152 TRANSCODER_DSI_A, 153 TRANSCODER_DSI_C, 154 I915_MAX_TRANSCODERS 155 }; 156 157 static inline const char *transcoder_name(enum transcoder transcoder) 158 { 159 switch (transcoder) { 160 case TRANSCODER_A: 161 return "A"; 162 case TRANSCODER_B: 163 return "B"; 164 case TRANSCODER_C: 165 return "C"; 166 case TRANSCODER_EDP: 167 return "EDP"; 168 case TRANSCODER_DSI_A: 169 return "DSI A"; 170 case TRANSCODER_DSI_C: 171 return "DSI C"; 172 default: 173 return "<invalid>"; 174 } 175 } 176 177 static inline bool transcoder_is_dsi(enum transcoder transcoder) 178 { 179 return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C; 180 } 181 182 /* 183 * I915_MAX_PLANES in the enum below is the maximum (across all platforms) 184 * number of planes per CRTC. Not all platforms really have this many planes, 185 * which means some arrays of size I915_MAX_PLANES may have unused entries 186 * between the topmost sprite plane and the cursor plane. 187 */ 188 enum plane { 189 PLANE_A = 0, 190 PLANE_B, 191 PLANE_C, 192 PLANE_CURSOR, 193 I915_MAX_PLANES, 194 }; 195 #define plane_name(p) ((p) + 'A') 196 197 #define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A') 198 199 enum port { 200 PORT_NONE = -1, 201 PORT_A = 0, 202 PORT_B, 203 PORT_C, 204 PORT_D, 205 PORT_E, 206 I915_MAX_PORTS 207 }; 208 #define port_name(p) ((p) + 'A') 209 210 #define I915_NUM_PHYS_VLV 2 211 212 enum dpio_channel { 213 DPIO_CH0, 214 DPIO_CH1 215 }; 216 217 enum dpio_phy { 218 DPIO_PHY0, 219 DPIO_PHY1 220 }; 221 222 enum intel_display_power_domain { 223 POWER_DOMAIN_PIPE_A, 224 POWER_DOMAIN_PIPE_B, 225 POWER_DOMAIN_PIPE_C, 226 POWER_DOMAIN_PIPE_A_PANEL_FITTER, 227 POWER_DOMAIN_PIPE_B_PANEL_FITTER, 228 POWER_DOMAIN_PIPE_C_PANEL_FITTER, 229 POWER_DOMAIN_TRANSCODER_A, 230 POWER_DOMAIN_TRANSCODER_B, 231 POWER_DOMAIN_TRANSCODER_C, 232 POWER_DOMAIN_TRANSCODER_EDP, 233 POWER_DOMAIN_TRANSCODER_DSI_A, 234 POWER_DOMAIN_TRANSCODER_DSI_C, 235 POWER_DOMAIN_PORT_DDI_A_LANES, 236 POWER_DOMAIN_PORT_DDI_B_LANES, 237 POWER_DOMAIN_PORT_DDI_C_LANES, 238 POWER_DOMAIN_PORT_DDI_D_LANES, 239 POWER_DOMAIN_PORT_DDI_E_LANES, 240 POWER_DOMAIN_PORT_DSI, 241 POWER_DOMAIN_PORT_CRT, 242 POWER_DOMAIN_PORT_OTHER, 243 POWER_DOMAIN_VGA, 244 POWER_DOMAIN_AUDIO, 245 POWER_DOMAIN_PLLS, 246 POWER_DOMAIN_AUX_A, 247 POWER_DOMAIN_AUX_B, 248 POWER_DOMAIN_AUX_C, 249 POWER_DOMAIN_AUX_D, 250 POWER_DOMAIN_GMBUS, 251 POWER_DOMAIN_MODESET, 252 POWER_DOMAIN_INIT, 253 254 POWER_DOMAIN_NUM, 255 }; 256 257 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 258 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 259 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 260 #define POWER_DOMAIN_TRANSCODER(tran) \ 261 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ 262 (tran) + POWER_DOMAIN_TRANSCODER_A) 263 264 enum hpd_pin { 265 HPD_NONE = 0, 266 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 267 HPD_CRT, 268 HPD_SDVO_B, 269 HPD_SDVO_C, 270 HPD_PORT_A, 271 HPD_PORT_B, 272 HPD_PORT_C, 273 HPD_PORT_D, 274 HPD_PORT_E, 275 HPD_NUM_PINS 276 }; 277 278 #define for_each_hpd_pin(__pin) \ 279 for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) 280 281 struct i915_hotplug { 282 struct work_struct hotplug_work; 283 284 struct { 285 unsigned long last_jiffies; 286 int count; 287 enum { 288 HPD_ENABLED = 0, 289 HPD_DISABLED = 1, 290 HPD_MARK_DISABLED = 2 291 } state; 292 } stats[HPD_NUM_PINS]; 293 u32 event_bits; 294 struct delayed_work reenable_work; 295 296 struct intel_digital_port *irq_port[I915_MAX_PORTS]; 297 u32 long_port_mask; 298 u32 short_port_mask; 299 struct work_struct dig_port_work; 300 301 struct work_struct poll_init_work; 302 bool poll_enabled; 303 304 /* 305 * if we get a HPD irq from DP and a HPD irq from non-DP 306 * the non-DP HPD could block the workqueue on a mode config 307 * mutex getting, that userspace may have taken. However 308 * userspace is waiting on the DP workqueue to run which is 309 * blocked behind the non-DP one. 310 */ 311 struct workqueue_struct *dp_wq; 312 }; 313 314 #define I915_GEM_GPU_DOMAINS \ 315 (I915_GEM_DOMAIN_RENDER | \ 316 I915_GEM_DOMAIN_SAMPLER | \ 317 I915_GEM_DOMAIN_COMMAND | \ 318 I915_GEM_DOMAIN_INSTRUCTION | \ 319 I915_GEM_DOMAIN_VERTEX) 320 321 #define for_each_pipe(__dev_priv, __p) \ 322 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) 323 #define for_each_pipe_masked(__dev_priv, __p, __mask) \ 324 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \ 325 for_each_if ((__mask) & (1 << (__p))) 326 #define for_each_universal_plane(__dev_priv, __pipe, __p) \ 327 for ((__p) = 0; \ 328 (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ 329 (__p)++) 330 #define for_each_sprite(__dev_priv, __p, __s) \ 331 for ((__s) = 0; \ 332 (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ 333 (__s)++) 334 335 #define for_each_port_masked(__port, __ports_mask) \ 336 for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ 337 for_each_if ((__ports_mask) & (1 << (__port))) 338 339 #define for_each_crtc(dev, crtc) \ 340 list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) 341 342 #define for_each_intel_plane(dev, intel_plane) \ 343 list_for_each_entry(intel_plane, \ 344 &(dev)->mode_config.plane_list, \ 345 base.head) 346 347 #define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \ 348 list_for_each_entry(intel_plane, \ 349 &(dev)->mode_config.plane_list, \ 350 base.head) \ 351 for_each_if ((plane_mask) & \ 352 (1 << drm_plane_index(&intel_plane->base))) 353 354 #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ 355 list_for_each_entry(intel_plane, \ 356 &(dev)->mode_config.plane_list, \ 357 base.head) \ 358 for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe) 359 360 #define for_each_intel_crtc(dev, intel_crtc) \ 361 list_for_each_entry(intel_crtc, \ 362 &(dev)->mode_config.crtc_list, \ 363 base.head) 364 365 #define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \ 366 list_for_each_entry(intel_crtc, \ 367 &(dev)->mode_config.crtc_list, \ 368 base.head) \ 369 for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base))) 370 371 #define for_each_intel_encoder(dev, intel_encoder) \ 372 list_for_each_entry(intel_encoder, \ 373 &(dev)->mode_config.encoder_list, \ 374 base.head) 375 376 #define for_each_intel_connector(dev, intel_connector) \ 377 list_for_each_entry(intel_connector, \ 378 &(dev)->mode_config.connector_list, \ 379 base.head) 380 381 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 382 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 383 for_each_if ((intel_encoder)->base.crtc == (__crtc)) 384 385 #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ 386 list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ 387 for_each_if ((intel_connector)->base.encoder == (__encoder)) 388 389 #define for_each_power_domain(domain, mask) \ 390 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 391 for_each_if ((1 << (domain)) & (mask)) 392 393 struct drm_i915_private; 394 struct i915_mm_struct; 395 struct i915_mmu_object; 396 397 struct drm_i915_file_private { 398 struct drm_i915_private *dev_priv; 399 struct drm_file *file; 400 401 struct { 402 spinlock_t lock; 403 struct list_head request_list; 404 /* 20ms is a fairly arbitrary limit (greater than the average frame time) 405 * chosen to prevent the CPU getting more than a frame ahead of the GPU 406 * (when using lax throttling for the frontbuffer). We also use it to 407 * offer free GPU waitboosts for severely congested workloads. 408 */ 409 #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) 410 } mm; 411 struct idr context_idr; 412 413 struct intel_rps_client { 414 struct list_head link; 415 unsigned boosts; 416 } rps; 417 418 unsigned int bsd_engine; 419 }; 420 421 /* Used by dp and fdi links */ 422 struct intel_link_m_n { 423 uint32_t tu; 424 uint32_t gmch_m; 425 uint32_t gmch_n; 426 uint32_t link_m; 427 uint32_t link_n; 428 }; 429 430 void intel_link_compute_m_n(int bpp, int nlanes, 431 int pixel_clock, int link_clock, 432 struct intel_link_m_n *m_n); 433 434 /* Interface history: 435 * 436 * 1.1: Original. 437 * 1.2: Add Power Management 438 * 1.3: Add vblank support 439 * 1.4: Fix cmdbuffer path, add heap destroy 440 * 1.5: Add vblank pipe configuration 441 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 442 * - Support vertical blank on secondary display pipe 443 */ 444 #define DRIVER_MAJOR 1 445 #define DRIVER_MINOR 6 446 #define DRIVER_PATCHLEVEL 0 447 448 struct opregion_header; 449 struct opregion_acpi; 450 struct opregion_swsci; 451 struct opregion_asle; 452 453 struct intel_opregion { 454 struct opregion_header *header; 455 struct opregion_acpi *acpi; 456 struct opregion_swsci *swsci; 457 u32 swsci_gbda_sub_functions; 458 u32 swsci_sbcb_sub_functions; 459 struct opregion_asle *asle; 460 void *rvda; 461 const void *vbt; 462 u32 vbt_size; 463 u32 *lid_state; 464 struct work_struct asle_work; 465 }; 466 #define OPREGION_SIZE (8*1024) 467 468 struct intel_overlay; 469 struct intel_overlay_error_state; 470 471 struct sdvo_device_mapping { 472 u8 initialized; 473 u8 dvo_port; 474 u8 slave_addr; 475 u8 dvo_wiring; 476 u8 i2c_pin; 477 u8 ddc_pin; 478 }; 479 480 struct intel_connector; 481 struct intel_encoder; 482 struct intel_atomic_state; 483 struct intel_crtc_state; 484 struct intel_initial_plane_config; 485 struct intel_crtc; 486 struct intel_limit; 487 struct dpll; 488 489 struct drm_i915_display_funcs { 490 int (*get_display_clock_speed)(struct drm_i915_private *dev_priv); 491 int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane); 492 int (*compute_pipe_wm)(struct intel_crtc_state *cstate); 493 int (*compute_intermediate_wm)(struct drm_device *dev, 494 struct intel_crtc *intel_crtc, 495 struct intel_crtc_state *newstate); 496 void (*initial_watermarks)(struct intel_atomic_state *state, 497 struct intel_crtc_state *cstate); 498 void (*atomic_update_watermarks)(struct intel_atomic_state *state, 499 struct intel_crtc_state *cstate); 500 void (*optimize_watermarks)(struct intel_atomic_state *state, 501 struct intel_crtc_state *cstate); 502 int (*compute_global_watermarks)(struct drm_atomic_state *state); 503 void (*update_wm)(struct intel_crtc *crtc); 504 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 505 void (*modeset_commit_cdclk)(struct drm_atomic_state *state); 506 /* Returns the active state of the crtc, and if the crtc is active, 507 * fills out the pipe-config with the hw state. */ 508 bool (*get_pipe_config)(struct intel_crtc *, 509 struct intel_crtc_state *); 510 void (*get_initial_plane_config)(struct intel_crtc *, 511 struct intel_initial_plane_config *); 512 int (*crtc_compute_clock)(struct intel_crtc *crtc, 513 struct intel_crtc_state *crtc_state); 514 void (*crtc_enable)(struct intel_crtc_state *pipe_config, 515 struct drm_atomic_state *old_state); 516 void (*crtc_disable)(struct intel_crtc_state *old_crtc_state, 517 struct drm_atomic_state *old_state); 518 void (*update_crtcs)(struct drm_atomic_state *state, 519 unsigned int *crtc_vblank_mask); 520 void (*audio_codec_enable)(struct drm_connector *connector, 521 struct intel_encoder *encoder, 522 const struct drm_display_mode *adjusted_mode); 523 void (*audio_codec_disable)(struct intel_encoder *encoder); 524 void (*fdi_link_train)(struct drm_crtc *crtc); 525 void (*init_clock_gating)(struct drm_i915_private *dev_priv); 526 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 527 struct drm_framebuffer *fb, 528 struct drm_i915_gem_object *obj, 529 struct drm_i915_gem_request *req, 530 uint32_t flags); 531 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); 532 /* clock updates for mode set */ 533 /* cursor updates */ 534 /* render clock increase/decrease */ 535 /* display clock increase/decrease */ 536 /* pll clock increase/decrease */ 537 538 void (*load_csc_matrix)(struct drm_crtc_state *crtc_state); 539 void (*load_luts)(struct drm_crtc_state *crtc_state); 540 }; 541 542 enum forcewake_domain_id { 543 FW_DOMAIN_ID_RENDER = 0, 544 FW_DOMAIN_ID_BLITTER, 545 FW_DOMAIN_ID_MEDIA, 546 547 FW_DOMAIN_ID_COUNT 548 }; 549 550 enum forcewake_domains { 551 FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER), 552 FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER), 553 FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA), 554 FORCEWAKE_ALL = (FORCEWAKE_RENDER | 555 FORCEWAKE_BLITTER | 556 FORCEWAKE_MEDIA) 557 }; 558 559 #define FW_REG_READ (1) 560 #define FW_REG_WRITE (2) 561 562 enum decoupled_power_domain { 563 GEN9_DECOUPLED_PD_BLITTER = 0, 564 GEN9_DECOUPLED_PD_RENDER, 565 GEN9_DECOUPLED_PD_MEDIA, 566 GEN9_DECOUPLED_PD_ALL 567 }; 568 569 enum decoupled_ops { 570 GEN9_DECOUPLED_OP_WRITE = 0, 571 GEN9_DECOUPLED_OP_READ 572 }; 573 574 enum forcewake_domains 575 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, 576 i915_reg_t reg, unsigned int op); 577 578 struct intel_uncore_funcs { 579 void (*force_wake_get)(struct drm_i915_private *dev_priv, 580 enum forcewake_domains domains); 581 void (*force_wake_put)(struct drm_i915_private *dev_priv, 582 enum forcewake_domains domains); 583 584 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 585 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 586 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 587 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); 588 589 void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r, 590 uint8_t val, bool trace); 591 void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r, 592 uint16_t val, bool trace); 593 void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r, 594 uint32_t val, bool trace); 595 }; 596 597 struct intel_forcewake_range { 598 u32 start; 599 u32 end; 600 601 enum forcewake_domains domains; 602 }; 603 604 struct intel_uncore { 605 spinlock_t lock; /** lock is also taken in irq contexts. */ 606 607 const struct intel_forcewake_range *fw_domains_table; 608 unsigned int fw_domains_table_entries; 609 610 struct intel_uncore_funcs funcs; 611 612 unsigned fifo_count; 613 614 enum forcewake_domains fw_domains; 615 enum forcewake_domains fw_domains_active; 616 617 struct intel_uncore_forcewake_domain { 618 struct drm_i915_private *i915; 619 enum forcewake_domain_id id; 620 enum forcewake_domains mask; 621 unsigned wake_count; 622 struct hrtimer timer; 623 i915_reg_t reg_set; 624 u32 val_set; 625 u32 val_clear; 626 i915_reg_t reg_ack; 627 i915_reg_t reg_post; 628 u32 val_reset; 629 } fw_domain[FW_DOMAIN_ID_COUNT]; 630 631 int unclaimed_mmio_check; 632 }; 633 634 /* Iterate over initialised fw domains */ 635 #define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \ 636 for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ 637 (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \ 638 (domain__)++) \ 639 for_each_if ((mask__) & (domain__)->mask) 640 641 #define for_each_fw_domain(domain__, dev_priv__) \ 642 for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__) 643 644 #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) 645 #define CSR_VERSION_MAJOR(version) ((version) >> 16) 646 #define CSR_VERSION_MINOR(version) ((version) & 0xffff) 647 648 struct intel_csr { 649 struct work_struct work; 650 const char *fw_path; 651 uint32_t *dmc_payload; 652 uint32_t dmc_fw_size; 653 uint32_t version; 654 uint32_t mmio_count; 655 i915_reg_t mmioaddr[8]; 656 uint32_t mmiodata[8]; 657 uint32_t dc_state; 658 uint32_t allowed_dc_mask; 659 }; 660 661 #define DEV_INFO_FOR_EACH_FLAG(func) \ 662 /* Keep is_* in chronological order */ \ 663 func(is_mobile); \ 664 func(is_i85x); \ 665 func(is_i915g); \ 666 func(is_i945gm); \ 667 func(is_g33); \ 668 func(is_g4x); \ 669 func(is_pineview); \ 670 func(is_broadwater); \ 671 func(is_crestline); \ 672 func(is_ivybridge); \ 673 func(is_valleyview); \ 674 func(is_cherryview); \ 675 func(is_haswell); \ 676 func(is_broadwell); \ 677 func(is_skylake); \ 678 func(is_broxton); \ 679 func(is_kabylake); \ 680 func(is_alpha_support); \ 681 /* Keep has_* in alphabetical order */ \ 682 func(has_64bit_reloc); \ 683 func(has_csr); \ 684 func(has_ddi); \ 685 func(has_dp_mst); \ 686 func(has_fbc); \ 687 func(has_fpga_dbg); \ 688 func(has_gmbus_irq); \ 689 func(has_gmch_display); \ 690 func(has_guc); \ 691 func(has_hotplug); \ 692 func(has_hw_contexts); \ 693 func(has_l3_dpf); \ 694 func(has_llc); \ 695 func(has_logical_ring_contexts); \ 696 func(has_overlay); \ 697 func(has_pipe_cxsr); \ 698 func(has_pooled_eu); \ 699 func(has_psr); \ 700 func(has_rc6); \ 701 func(has_rc6p); \ 702 func(has_resource_streamer); \ 703 func(has_runtime_pm); \ 704 func(has_snoop); \ 705 func(cursor_needs_physical); \ 706 func(hws_needs_physical); \ 707 func(overlay_needs_physical); \ 708 func(supports_tv); \ 709 func(has_decoupled_mmio) 710 711 struct sseu_dev_info { 712 u8 slice_mask; 713 u8 subslice_mask; 714 u8 eu_total; 715 u8 eu_per_subslice; 716 u8 min_eu_in_pool; 717 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ 718 u8 subslice_7eu[3]; 719 u8 has_slice_pg:1; 720 u8 has_subslice_pg:1; 721 u8 has_eu_pg:1; 722 }; 723 724 static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu) 725 { 726 return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask); 727 } 728 729 struct intel_device_info { 730 u32 display_mmio_offset; 731 u16 device_id; 732 u8 num_pipes; 733 u8 num_sprites[I915_MAX_PIPES]; 734 u8 gen; 735 u16 gen_mask; 736 u8 ring_mask; /* Rings supported by the HW */ 737 u8 num_rings; 738 #define DEFINE_FLAG(name) u8 name:1 739 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); 740 #undef DEFINE_FLAG 741 u16 ddb_size; /* in blocks */ 742 /* Register offsets for the various display pipes and transcoders */ 743 int pipe_offsets[I915_MAX_TRANSCODERS]; 744 int trans_offsets[I915_MAX_TRANSCODERS]; 745 int palette_offsets[I915_MAX_PIPES]; 746 int cursor_offsets[I915_MAX_PIPES]; 747 748 /* Slice/subslice/EU info */ 749 struct sseu_dev_info sseu; 750 751 struct color_luts { 752 u16 degamma_lut_size; 753 u16 gamma_lut_size; 754 } color; 755 }; 756 757 struct intel_display_error_state; 758 759 struct drm_i915_error_state { 760 struct kref ref; 761 struct timeval time; 762 struct timeval boottime; 763 struct timeval uptime; 764 765 struct drm_i915_private *i915; 766 767 char error_msg[128]; 768 bool simulated; 769 int iommu; 770 u32 reset_count; 771 u32 suspend_count; 772 struct intel_device_info device_info; 773 774 /* Generic register state */ 775 u32 eir; 776 u32 pgtbl_er; 777 u32 ier; 778 u32 gtier[4]; 779 u32 ccid; 780 u32 derrmr; 781 u32 forcewake; 782 u32 error; /* gen6+ */ 783 u32 err_int; /* gen7 */ 784 u32 fault_data0; /* gen8, gen9 */ 785 u32 fault_data1; /* gen8, gen9 */ 786 u32 done_reg; 787 u32 gac_eco; 788 u32 gam_ecochk; 789 u32 gab_ctl; 790 u32 gfx_mode; 791 792 u64 fence[I915_MAX_NUM_FENCES]; 793 struct intel_overlay_error_state *overlay; 794 struct intel_display_error_state *display; 795 struct drm_i915_error_object *semaphore; 796 struct drm_i915_error_object *guc_log; 797 798 struct drm_i915_error_engine { 799 int engine_id; 800 /* Software tracked state */ 801 bool waiting; 802 int num_waiters; 803 int hangcheck_score; 804 enum intel_engine_hangcheck_action hangcheck_action; 805 struct i915_address_space *vm; 806 int num_requests; 807 808 /* position of active request inside the ring */ 809 u32 rq_head, rq_post, rq_tail; 810 811 /* our own tracking of ring head and tail */ 812 u32 cpu_ring_head; 813 u32 cpu_ring_tail; 814 815 u32 last_seqno; 816 817 /* Register state */ 818 u32 start; 819 u32 tail; 820 u32 head; 821 u32 ctl; 822 u32 mode; 823 u32 hws; 824 u32 ipeir; 825 u32 ipehr; 826 u32 bbstate; 827 u32 instpm; 828 u32 instps; 829 u32 seqno; 830 u64 bbaddr; 831 u64 acthd; 832 u32 fault_reg; 833 u64 faddr; 834 u32 rc_psmi; /* sleep state */ 835 u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; 836 struct intel_instdone instdone; 837 838 struct drm_i915_error_object { 839 u64 gtt_offset; 840 u64 gtt_size; 841 int page_count; 842 int unused; 843 u32 *pages[0]; 844 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; 845 846 struct drm_i915_error_object *wa_ctx; 847 848 struct drm_i915_error_request { 849 long jiffies; 850 pid_t pid; 851 u32 context; 852 u32 seqno; 853 u32 head; 854 u32 tail; 855 } *requests, execlist[2]; 856 857 struct drm_i915_error_waiter { 858 char comm[TASK_COMM_LEN]; 859 pid_t pid; 860 u32 seqno; 861 } *waiters; 862 863 struct { 864 u32 gfx_mode; 865 union { 866 u64 pdp[4]; 867 u32 pp_dir_base; 868 }; 869 } vm_info; 870 871 pid_t pid; 872 char comm[TASK_COMM_LEN]; 873 } engine[I915_NUM_ENGINES]; 874 875 struct drm_i915_error_buffer { 876 u32 size; 877 u32 name; 878 u32 rseqno[I915_NUM_ENGINES], wseqno; 879 u64 gtt_offset; 880 u32 read_domains; 881 u32 write_domain; 882 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 883 u32 tiling:2; 884 u32 dirty:1; 885 u32 purgeable:1; 886 u32 userptr:1; 887 s32 engine:4; 888 u32 cache_level:3; 889 } *active_bo[I915_NUM_ENGINES], *pinned_bo; 890 u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; 891 struct i915_address_space *active_vm[I915_NUM_ENGINES]; 892 }; 893 894 enum i915_cache_level { 895 I915_CACHE_NONE = 0, 896 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ 897 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc 898 caches, eg sampler/render caches, and the 899 large Last-Level-Cache. LLC is coherent with 900 the CPU, but L3 is only visible to the GPU. */ 901 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ 902 }; 903 904 struct i915_ctx_hang_stats { 905 /* This context had batch pending when hang was declared */ 906 unsigned batch_pending; 907 908 /* This context had batch active when hang was declared */ 909 unsigned batch_active; 910 911 /* Time when this context was last blamed for a GPU reset */ 912 unsigned long guilty_ts; 913 914 /* If the contexts causes a second GPU hang within this time, 915 * it is permanently banned from submitting any more work. 916 */ 917 unsigned long ban_period_seconds; 918 919 /* This context is banned to submit more work */ 920 bool banned; 921 }; 922 923 /* This must match up with the value previously used for execbuf2.rsvd1. */ 924 #define DEFAULT_CONTEXT_HANDLE 0 925 926 /** 927 * struct i915_gem_context - as the name implies, represents a context. 928 * @ref: reference count. 929 * @user_handle: userspace tracking identity for this context. 930 * @remap_slice: l3 row remapping information. 931 * @flags: context specific flags: 932 * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0. 933 * @file_priv: filp associated with this context (NULL for global default 934 * context). 935 * @hang_stats: information about the role of this context in possible GPU 936 * hangs. 937 * @ppgtt: virtual memory space used by this context. 938 * @legacy_hw_ctx: render context backing object and whether it is correctly 939 * initialized (legacy ring submission mechanism only). 940 * @link: link in the global list of contexts. 941 * 942 * Contexts are memory images used by the hardware to store copies of their 943 * internal state. 944 */ 945 struct i915_gem_context { 946 struct kref ref; 947 struct drm_i915_private *i915; 948 struct drm_i915_file_private *file_priv; 949 struct i915_hw_ppgtt *ppgtt; 950 struct pid *pid; 951 const char *name; 952 953 struct i915_ctx_hang_stats hang_stats; 954 955 unsigned long flags; 956 #define CONTEXT_NO_ZEROMAP BIT(0) 957 #define CONTEXT_NO_ERROR_CAPTURE BIT(1) 958 959 /* Unique identifier for this context, used by the hw for tracking */ 960 unsigned int hw_id; 961 u32 user_handle; 962 int priority; /* greater priorities are serviced first */ 963 964 u32 ggtt_alignment; 965 966 struct intel_context { 967 struct i915_vma *state; 968 struct intel_ring *ring; 969 uint32_t *lrc_reg_state; 970 u64 lrc_desc; 971 int pin_count; 972 bool initialised; 973 } engine[I915_NUM_ENGINES]; 974 u32 ring_size; 975 u32 desc_template; 976 struct atomic_notifier_head status_notifier; 977 bool execlists_force_single_submission; 978 979 struct list_head link; 980 981 u8 remap_slice; 982 bool closed:1; 983 }; 984 985 enum fb_op_origin { 986 ORIGIN_GTT, 987 ORIGIN_CPU, 988 ORIGIN_CS, 989 ORIGIN_FLIP, 990 ORIGIN_DIRTYFB, 991 }; 992 993 struct intel_fbc { 994 /* This is always the inner lock when overlapping with struct_mutex and 995 * it's the outer lock when overlapping with stolen_lock. */ 996 struct mutex lock; 997 unsigned threshold; 998 unsigned int possible_framebuffer_bits; 999 unsigned int busy_bits; 1000 unsigned int visible_pipes_mask; 1001 struct intel_crtc *crtc; 1002 1003 struct drm_mm_node compressed_fb; 1004 struct drm_mm_node *compressed_llb; 1005 1006 bool false_color; 1007 1008 bool enabled; 1009 bool active; 1010 1011 bool underrun_detected; 1012 struct work_struct underrun_work; 1013 1014 struct intel_fbc_state_cache { 1015 struct i915_vma *vma; 1016 1017 struct { 1018 unsigned int mode_flags; 1019 uint32_t hsw_bdw_pixel_rate; 1020 } crtc; 1021 1022 struct { 1023 unsigned int rotation; 1024 int src_w; 1025 int src_h; 1026 bool visible; 1027 } plane; 1028 1029 struct { 1030 uint32_t pixel_format; 1031 unsigned int stride; 1032 } fb; 1033 } state_cache; 1034 1035 struct intel_fbc_reg_params { 1036 struct i915_vma *vma; 1037 1038 struct { 1039 enum pipe pipe; 1040 enum plane plane; 1041 unsigned int fence_y_offset; 1042 } crtc; 1043 1044 struct { 1045 uint32_t pixel_format; 1046 unsigned int stride; 1047 } fb; 1048 1049 int cfb_size; 1050 } params; 1051 1052 struct intel_fbc_work { 1053 bool scheduled; 1054 u32 scheduled_vblank; 1055 struct work_struct work; 1056 } work; 1057 1058 const char *no_fbc_reason; 1059 }; 1060 1061 /** 1062 * HIGH_RR is the highest eDP panel refresh rate read from EDID 1063 * LOW_RR is the lowest eDP panel refresh rate found from EDID 1064 * parsing for same resolution. 1065 */ 1066 enum drrs_refresh_rate_type { 1067 DRRS_HIGH_RR, 1068 DRRS_LOW_RR, 1069 DRRS_MAX_RR, /* RR count */ 1070 }; 1071 1072 enum drrs_support_type { 1073 DRRS_NOT_SUPPORTED = 0, 1074 STATIC_DRRS_SUPPORT = 1, 1075 SEAMLESS_DRRS_SUPPORT = 2 1076 }; 1077 1078 struct intel_dp; 1079 struct i915_drrs { 1080 struct mutex mutex; 1081 struct delayed_work work; 1082 struct intel_dp *dp; 1083 unsigned busy_frontbuffer_bits; 1084 enum drrs_refresh_rate_type refresh_rate_type; 1085 enum drrs_support_type type; 1086 }; 1087 1088 struct i915_psr { 1089 struct mutex lock; 1090 bool sink_support; 1091 bool source_ok; 1092 struct intel_dp *enabled; 1093 bool active; 1094 struct delayed_work work; 1095 unsigned busy_frontbuffer_bits; 1096 bool psr2_support; 1097 bool aux_frame_sync; 1098 bool link_standby; 1099 }; 1100 1101 enum intel_pch { 1102 PCH_NONE = 0, /* No PCH present */ 1103 PCH_IBX, /* Ibexpeak PCH */ 1104 PCH_CPT, /* Cougarpoint PCH */ 1105 PCH_LPT, /* Lynxpoint PCH */ 1106 PCH_SPT, /* Sunrisepoint PCH */ 1107 PCH_KBP, /* Kabypoint PCH */ 1108 PCH_NOP, 1109 }; 1110 1111 enum intel_sbi_destination { 1112 SBI_ICLK, 1113 SBI_MPHY, 1114 }; 1115 1116 #define QUIRK_PIPEA_FORCE (1<<0) 1117 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 1118 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 1119 #define QUIRK_BACKLIGHT_PRESENT (1<<3) 1120 #define QUIRK_PIPEB_FORCE (1<<4) 1121 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) 1122 1123 struct intel_fbdev; 1124 struct intel_fbc_work; 1125 1126 struct intel_gmbus { 1127 struct i2c_adapter adapter; 1128 #define GMBUS_FORCE_BIT_RETRY (1U << 31) 1129 u32 force_bit; 1130 u32 reg0; 1131 i915_reg_t gpio_reg; 1132 struct i2c_algo_bit_data bit_algo; 1133 struct drm_i915_private *dev_priv; 1134 }; 1135 1136 struct i915_suspend_saved_registers { 1137 u32 saveDSPARB; 1138 u32 saveFBC_CONTROL; 1139 u32 saveCACHE_MODE_0; 1140 u32 saveMI_ARB_STATE; 1141 u32 saveSWF0[16]; 1142 u32 saveSWF1[16]; 1143 u32 saveSWF3[3]; 1144 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 1145 u32 savePCH_PORT_HOTPLUG; 1146 u16 saveGCDGMBUS; 1147 }; 1148 1149 struct vlv_s0ix_state { 1150 /* GAM */ 1151 u32 wr_watermark; 1152 u32 gfx_prio_ctrl; 1153 u32 arb_mode; 1154 u32 gfx_pend_tlb0; 1155 u32 gfx_pend_tlb1; 1156 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; 1157 u32 media_max_req_count; 1158 u32 gfx_max_req_count; 1159 u32 render_hwsp; 1160 u32 ecochk; 1161 u32 bsd_hwsp; 1162 u32 blt_hwsp; 1163 u32 tlb_rd_addr; 1164 1165 /* MBC */ 1166 u32 g3dctl; 1167 u32 gsckgctl; 1168 u32 mbctl; 1169 1170 /* GCP */ 1171 u32 ucgctl1; 1172 u32 ucgctl3; 1173 u32 rcgctl1; 1174 u32 rcgctl2; 1175 u32 rstctl; 1176 u32 misccpctl; 1177 1178 /* GPM */ 1179 u32 gfxpause; 1180 u32 rpdeuhwtc; 1181 u32 rpdeuc; 1182 u32 ecobus; 1183 u32 pwrdwnupctl; 1184 u32 rp_down_timeout; 1185 u32 rp_deucsw; 1186 u32 rcubmabdtmr; 1187 u32 rcedata; 1188 u32 spare2gh; 1189 1190 /* Display 1 CZ domain */ 1191 u32 gt_imr; 1192 u32 gt_ier; 1193 u32 pm_imr; 1194 u32 pm_ier; 1195 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; 1196 1197 /* GT SA CZ domain */ 1198 u32 tilectl; 1199 u32 gt_fifoctl; 1200 u32 gtlc_wake_ctrl; 1201 u32 gtlc_survive; 1202 u32 pmwgicz; 1203 1204 /* Display 2 CZ domain */ 1205 u32 gu_ctl0; 1206 u32 gu_ctl1; 1207 u32 pcbr; 1208 u32 clock_gate_dis2; 1209 }; 1210 1211 struct intel_rps_ei { 1212 u32 cz_clock; 1213 u32 render_c0; 1214 u32 media_c0; 1215 }; 1216 1217 struct intel_gen6_power_mgmt { 1218 /* 1219 * work, interrupts_enabled and pm_iir are protected by 1220 * dev_priv->irq_lock 1221 */ 1222 struct work_struct work; 1223 bool interrupts_enabled; 1224 u32 pm_iir; 1225 1226 /* PM interrupt bits that should never be masked */ 1227 u32 pm_intr_keep; 1228 1229 /* Frequencies are stored in potentially platform dependent multiples. 1230 * In other words, *_freq needs to be multiplied by X to be interesting. 1231 * Soft limits are those which are used for the dynamic reclocking done 1232 * by the driver (raise frequencies under heavy loads, and lower for 1233 * lighter loads). Hard limits are those imposed by the hardware. 1234 * 1235 * A distinction is made for overclocking, which is never enabled by 1236 * default, and is considered to be above the hard limit if it's 1237 * possible at all. 1238 */ 1239 u8 cur_freq; /* Current frequency (cached, may not == HW) */ 1240 u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ 1241 u8 max_freq_softlimit; /* Max frequency permitted by the driver */ 1242 u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ 1243 u8 min_freq; /* AKA RPn. Minimum frequency */ 1244 u8 boost_freq; /* Frequency to request when wait boosting */ 1245 u8 idle_freq; /* Frequency to request when we are idle */ 1246 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 1247 u8 rp1_freq; /* "less than" RP0 power/freqency */ 1248 u8 rp0_freq; /* Non-overclocked max frequency. */ 1249 u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ 1250 1251 u8 up_threshold; /* Current %busy required to uplock */ 1252 u8 down_threshold; /* Current %busy required to downclock */ 1253 1254 int last_adj; 1255 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 1256 1257 spinlock_t client_lock; 1258 struct list_head clients; 1259 bool client_boost; 1260 1261 bool enabled; 1262 struct delayed_work autoenable_work; 1263 unsigned boosts; 1264 1265 /* manual wa residency calculations */ 1266 struct intel_rps_ei up_ei, down_ei; 1267 1268 /* 1269 * Protects RPS/RC6 register access and PCU communication. 1270 * Must be taken after struct_mutex if nested. Note that 1271 * this lock may be held for long periods of time when 1272 * talking to hw - so only take it when talking to hw! 1273 */ 1274 struct mutex hw_lock; 1275 }; 1276 1277 /* defined intel_pm.c */ 1278 extern spinlock_t mchdev_lock; 1279 1280 struct intel_ilk_power_mgmt { 1281 u8 cur_delay; 1282 u8 min_delay; 1283 u8 max_delay; 1284 u8 fmax; 1285 u8 fstart; 1286 1287 u64 last_count1; 1288 unsigned long last_time1; 1289 unsigned long chipset_power; 1290 u64 last_count2; 1291 u64 last_time2; 1292 unsigned long gfx_power; 1293 u8 corr; 1294 1295 int c_m; 1296 int r_t; 1297 }; 1298 1299 struct drm_i915_private; 1300 struct i915_power_well; 1301 1302 struct i915_power_well_ops { 1303 /* 1304 * Synchronize the well's hw state to match the current sw state, for 1305 * example enable/disable it based on the current refcount. Called 1306 * during driver init and resume time, possibly after first calling 1307 * the enable/disable handlers. 1308 */ 1309 void (*sync_hw)(struct drm_i915_private *dev_priv, 1310 struct i915_power_well *power_well); 1311 /* 1312 * Enable the well and resources that depend on it (for example 1313 * interrupts located on the well). Called after the 0->1 refcount 1314 * transition. 1315 */ 1316 void (*enable)(struct drm_i915_private *dev_priv, 1317 struct i915_power_well *power_well); 1318 /* 1319 * Disable the well and resources that depend on it. Called after 1320 * the 1->0 refcount transition. 1321 */ 1322 void (*disable)(struct drm_i915_private *dev_priv, 1323 struct i915_power_well *power_well); 1324 /* Returns the hw enabled state. */ 1325 bool (*is_enabled)(struct drm_i915_private *dev_priv, 1326 struct i915_power_well *power_well); 1327 }; 1328 1329 /* Power well structure for haswell */ 1330 struct i915_power_well { 1331 const char *name; 1332 bool always_on; 1333 /* power well enable/disable usage count */ 1334 int count; 1335 /* cached hw enabled state */ 1336 bool hw_enabled; 1337 unsigned long domains; 1338 /* unique identifier for this power well */ 1339 unsigned long id; 1340 /* 1341 * Arbitraty data associated with this power well. Platform and power 1342 * well specific. 1343 */ 1344 unsigned long data; 1345 const struct i915_power_well_ops *ops; 1346 }; 1347 1348 struct i915_power_domains { 1349 /* 1350 * Power wells needed for initialization at driver init and suspend 1351 * time are on. They are kept on until after the first modeset. 1352 */ 1353 bool init_power_on; 1354 bool initializing; 1355 int power_well_count; 1356 1357 struct mutex lock; 1358 int domain_use_count[POWER_DOMAIN_NUM]; 1359 struct i915_power_well *power_wells; 1360 }; 1361 1362 #define MAX_L3_SLICES 2 1363 struct intel_l3_parity { 1364 u32 *remap_info[MAX_L3_SLICES]; 1365 struct work_struct error_work; 1366 int which_slice; 1367 }; 1368 1369 struct i915_gem_mm { 1370 /** Memory allocator for GTT stolen memory */ 1371 struct drm_mm stolen; 1372 /** Protects the usage of the GTT stolen memory allocator. This is 1373 * always the inner lock when overlapping with struct_mutex. */ 1374 struct mutex stolen_lock; 1375 1376 /** List of all objects in gtt_space. Used to restore gtt 1377 * mappings on resume */ 1378 struct list_head bound_list; 1379 /** 1380 * List of objects which are not bound to the GTT (thus 1381 * are idle and not used by the GPU). These objects may or may 1382 * not actually have any pages attached. 1383 */ 1384 struct list_head unbound_list; 1385 1386 /** List of all objects in gtt_space, currently mmaped by userspace. 1387 * All objects within this list must also be on bound_list. 1388 */ 1389 struct list_head userfault_list; 1390 1391 /** 1392 * List of objects which are pending destruction. 1393 */ 1394 struct llist_head free_list; 1395 struct work_struct free_work; 1396 1397 /** Usable portion of the GTT for GEM */ 1398 unsigned long stolen_base; /* limited to low memory (32-bit) */ 1399 1400 /** PPGTT used for aliasing the PPGTT with the GTT */ 1401 struct i915_hw_ppgtt *aliasing_ppgtt; 1402 1403 struct notifier_block oom_notifier; 1404 struct notifier_block vmap_notifier; 1405 struct shrinker shrinker; 1406 1407 /** LRU list of objects with fence regs on them. */ 1408 struct list_head fence_list; 1409 1410 /** 1411 * Are we in a non-interruptible section of code like 1412 * modesetting? 1413 */ 1414 bool interruptible; 1415 1416 /* the indicator for dispatch video commands on two BSD rings */ 1417 atomic_t bsd_engine_dispatch_index; 1418 1419 /** Bit 6 swizzling required for X tiling */ 1420 uint32_t bit_6_swizzle_x; 1421 /** Bit 6 swizzling required for Y tiling */ 1422 uint32_t bit_6_swizzle_y; 1423 1424 /* accounting, useful for userland debugging */ 1425 spinlock_t object_stat_lock; 1426 u64 object_memory; 1427 u32 object_count; 1428 }; 1429 1430 struct drm_i915_error_state_buf { 1431 struct drm_i915_private *i915; 1432 unsigned bytes; 1433 unsigned size; 1434 int err; 1435 u8 *buf; 1436 loff_t start; 1437 loff_t pos; 1438 }; 1439 1440 struct i915_error_state_file_priv { 1441 struct drm_device *dev; 1442 struct drm_i915_error_state *error; 1443 }; 1444 1445 #define I915_RESET_TIMEOUT (10 * HZ) /* 10s */ 1446 #define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */ 1447 1448 struct i915_gpu_error { 1449 /* For hangcheck timer */ 1450 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 1451 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 1452 /* Hang gpu twice in this window and your context gets banned */ 1453 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) 1454 1455 struct delayed_work hangcheck_work; 1456 1457 /* For reset and error_state handling. */ 1458 spinlock_t lock; 1459 /* Protected by the above dev->gpu_error.lock. */ 1460 struct drm_i915_error_state *first_error; 1461 1462 unsigned long missed_irq_rings; 1463 1464 /** 1465 * State variable controlling the reset flow and count 1466 * 1467 * This is a counter which gets incremented when reset is triggered, 1468 * 1469 * Before the reset commences, the I915_RESET_IN_PROGRESS bit is set 1470 * meaning that any waiters holding onto the struct_mutex should 1471 * relinquish the lock immediately in order for the reset to start. 1472 * 1473 * If reset is not completed succesfully, the I915_WEDGE bit is 1474 * set meaning that hardware is terminally sour and there is no 1475 * recovery. All waiters on the reset_queue will be woken when 1476 * that happens. 1477 * 1478 * This counter is used by the wait_seqno code to notice that reset 1479 * event happened and it needs to restart the entire ioctl (since most 1480 * likely the seqno it waited for won't ever signal anytime soon). 1481 * 1482 * This is important for lock-free wait paths, where no contended lock 1483 * naturally enforces the correct ordering between the bail-out of the 1484 * waiter and the gpu reset work code. 1485 */ 1486 unsigned long reset_count; 1487 1488 unsigned long flags; 1489 #define I915_RESET_IN_PROGRESS 0 1490 #define I915_WEDGED (BITS_PER_LONG - 1) 1491 1492 /** 1493 * Waitqueue to signal when a hang is detected. Used to for waiters 1494 * to release the struct_mutex for the reset to procede. 1495 */ 1496 wait_queue_head_t wait_queue; 1497 1498 /** 1499 * Waitqueue to signal when the reset has completed. Used by clients 1500 * that wait for dev_priv->mm.wedged to settle. 1501 */ 1502 wait_queue_head_t reset_queue; 1503 1504 /* For missed irq/seqno simulation. */ 1505 unsigned long test_irq_rings; 1506 }; 1507 1508 enum modeset_restore { 1509 MODESET_ON_LID_OPEN, 1510 MODESET_DONE, 1511 MODESET_SUSPENDED, 1512 }; 1513 1514 #define DP_AUX_A 0x40 1515 #define DP_AUX_B 0x10 1516 #define DP_AUX_C 0x20 1517 #define DP_AUX_D 0x30 1518 1519 #define DDC_PIN_B 0x05 1520 #define DDC_PIN_C 0x04 1521 #define DDC_PIN_D 0x06 1522 1523 struct ddi_vbt_port_info { 1524 /* 1525 * This is an index in the HDMI/DVI DDI buffer translation table. 1526 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't 1527 * populate this field. 1528 */ 1529 #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff 1530 uint8_t hdmi_level_shift; 1531 1532 uint8_t supports_dvi:1; 1533 uint8_t supports_hdmi:1; 1534 uint8_t supports_dp:1; 1535 1536 uint8_t alternate_aux_channel; 1537 uint8_t alternate_ddc_pin; 1538 1539 uint8_t dp_boost_level; 1540 uint8_t hdmi_boost_level; 1541 }; 1542 1543 enum psr_lines_to_wait { 1544 PSR_0_LINES_TO_WAIT = 0, 1545 PSR_1_LINE_TO_WAIT, 1546 PSR_4_LINES_TO_WAIT, 1547 PSR_8_LINES_TO_WAIT 1548 }; 1549 1550 struct intel_vbt_data { 1551 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1552 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1553 1554 /* Feature bits */ 1555 unsigned int int_tv_support:1; 1556 unsigned int lvds_dither:1; 1557 unsigned int lvds_vbt:1; 1558 unsigned int int_crt_support:1; 1559 unsigned int lvds_use_ssc:1; 1560 unsigned int display_clock_mode:1; 1561 unsigned int fdi_rx_polarity_inverted:1; 1562 unsigned int panel_type:4; 1563 int lvds_ssc_freq; 1564 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 1565 1566 enum drrs_support_type drrs_type; 1567 1568 struct { 1569 int rate; 1570 int lanes; 1571 int preemphasis; 1572 int vswing; 1573 bool low_vswing; 1574 bool initialized; 1575 bool support; 1576 int bpp; 1577 struct edp_power_seq pps; 1578 } edp; 1579 1580 struct { 1581 bool full_link; 1582 bool require_aux_wakeup; 1583 int idle_frames; 1584 enum psr_lines_to_wait lines_to_wait; 1585 int tp1_wakeup_time; 1586 int tp2_tp3_wakeup_time; 1587 } psr; 1588 1589 struct { 1590 u16 pwm_freq_hz; 1591 bool present; 1592 bool active_low_pwm; 1593 u8 min_brightness; /* min_brightness/255 of max */ 1594 enum intel_backlight_type type; 1595 } backlight; 1596 1597 /* MIPI DSI */ 1598 struct { 1599 u16 panel_id; 1600 struct mipi_config *config; 1601 struct mipi_pps_data *pps; 1602 u8 seq_version; 1603 u32 size; 1604 u8 *data; 1605 const u8 *sequence[MIPI_SEQ_MAX]; 1606 } dsi; 1607 1608 int crt_ddc_pin; 1609 1610 int child_dev_num; 1611 union child_device_config *child_dev; 1612 1613 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; 1614 struct sdvo_device_mapping sdvo_mappings[2]; 1615 }; 1616 1617 enum intel_ddb_partitioning { 1618 INTEL_DDB_PART_1_2, 1619 INTEL_DDB_PART_5_6, /* IVB+ */ 1620 }; 1621 1622 struct intel_wm_level { 1623 bool enable; 1624 uint32_t pri_val; 1625 uint32_t spr_val; 1626 uint32_t cur_val; 1627 uint32_t fbc_val; 1628 }; 1629 1630 struct ilk_wm_values { 1631 uint32_t wm_pipe[3]; 1632 uint32_t wm_lp[3]; 1633 uint32_t wm_lp_spr[3]; 1634 uint32_t wm_linetime[3]; 1635 bool enable_fbc_wm; 1636 enum intel_ddb_partitioning partitioning; 1637 }; 1638 1639 struct vlv_pipe_wm { 1640 uint16_t primary; 1641 uint16_t sprite[2]; 1642 uint8_t cursor; 1643 }; 1644 1645 struct vlv_sr_wm { 1646 uint16_t plane; 1647 uint8_t cursor; 1648 }; 1649 1650 struct vlv_wm_values { 1651 struct vlv_pipe_wm pipe[3]; 1652 struct vlv_sr_wm sr; 1653 struct { 1654 uint8_t cursor; 1655 uint8_t sprite[2]; 1656 uint8_t primary; 1657 } ddl[3]; 1658 uint8_t level; 1659 bool cxsr; 1660 }; 1661 1662 struct skl_ddb_entry { 1663 uint16_t start, end; /* in number of blocks, 'end' is exclusive */ 1664 }; 1665 1666 static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) 1667 { 1668 return entry->end - entry->start; 1669 } 1670 1671 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, 1672 const struct skl_ddb_entry *e2) 1673 { 1674 if (e1->start == e2->start && e1->end == e2->end) 1675 return true; 1676 1677 return false; 1678 } 1679 1680 struct skl_ddb_allocation { 1681 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */ 1682 struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; 1683 }; 1684 1685 struct skl_wm_values { 1686 unsigned dirty_pipes; 1687 struct skl_ddb_allocation ddb; 1688 }; 1689 1690 struct skl_wm_level { 1691 bool plane_en; 1692 uint16_t plane_res_b; 1693 uint8_t plane_res_l; 1694 }; 1695 1696 /* 1697 * This struct helps tracking the state needed for runtime PM, which puts the 1698 * device in PCI D3 state. Notice that when this happens, nothing on the 1699 * graphics device works, even register access, so we don't get interrupts nor 1700 * anything else. 1701 * 1702 * Every piece of our code that needs to actually touch the hardware needs to 1703 * either call intel_runtime_pm_get or call intel_display_power_get with the 1704 * appropriate power domain. 1705 * 1706 * Our driver uses the autosuspend delay feature, which means we'll only really 1707 * suspend if we stay with zero refcount for a certain amount of time. The 1708 * default value is currently very conservative (see intel_runtime_pm_enable), but 1709 * it can be changed with the standard runtime PM files from sysfs. 1710 * 1711 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1712 * goes back to false exactly before we reenable the IRQs. We use this variable 1713 * to check if someone is trying to enable/disable IRQs while they're supposed 1714 * to be disabled. This shouldn't happen and we'll print some error messages in 1715 * case it happens. 1716 * 1717 * For more, read the Documentation/power/runtime_pm.txt. 1718 */ 1719 struct i915_runtime_pm { 1720 atomic_t wakeref_count; 1721 bool suspended; 1722 bool irqs_enabled; 1723 }; 1724 1725 enum intel_pipe_crc_source { 1726 INTEL_PIPE_CRC_SOURCE_NONE, 1727 INTEL_PIPE_CRC_SOURCE_PLANE1, 1728 INTEL_PIPE_CRC_SOURCE_PLANE2, 1729 INTEL_PIPE_CRC_SOURCE_PF, 1730 INTEL_PIPE_CRC_SOURCE_PIPE, 1731 /* TV/DP on pre-gen5/vlv can't use the pipe source. */ 1732 INTEL_PIPE_CRC_SOURCE_TV, 1733 INTEL_PIPE_CRC_SOURCE_DP_B, 1734 INTEL_PIPE_CRC_SOURCE_DP_C, 1735 INTEL_PIPE_CRC_SOURCE_DP_D, 1736 INTEL_PIPE_CRC_SOURCE_AUTO, 1737 INTEL_PIPE_CRC_SOURCE_MAX, 1738 }; 1739 1740 struct intel_pipe_crc_entry { 1741 uint32_t frame; 1742 uint32_t crc[5]; 1743 }; 1744 1745 #define INTEL_PIPE_CRC_ENTRIES_NR 128 1746 struct intel_pipe_crc { 1747 spinlock_t lock; 1748 bool opened; /* exclusive access to the result file */ 1749 struct intel_pipe_crc_entry *entries; 1750 enum intel_pipe_crc_source source; 1751 int head, tail; 1752 wait_queue_head_t wq; 1753 }; 1754 1755 struct i915_frontbuffer_tracking { 1756 spinlock_t lock; 1757 1758 /* 1759 * Tracking bits for delayed frontbuffer flushing du to gpu activity or 1760 * scheduled flips. 1761 */ 1762 unsigned busy_bits; 1763 unsigned flip_bits; 1764 }; 1765 1766 struct i915_wa_reg { 1767 i915_reg_t addr; 1768 u32 value; 1769 /* bitmask representing WA bits */ 1770 u32 mask; 1771 }; 1772 1773 /* 1774 * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only 1775 * allowing it for RCS as we don't foresee any requirement of having 1776 * a whitelist for other engines. When it is really required for 1777 * other engines then the limit need to be increased. 1778 */ 1779 #define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS) 1780 1781 struct i915_workarounds { 1782 struct i915_wa_reg reg[I915_MAX_WA_REGS]; 1783 u32 count; 1784 u32 hw_whitelist_count[I915_NUM_ENGINES]; 1785 }; 1786 1787 struct i915_virtual_gpu { 1788 bool active; 1789 }; 1790 1791 /* used in computing the new watermarks state */ 1792 struct intel_wm_config { 1793 unsigned int num_pipes_active; 1794 bool sprites_enabled; 1795 bool sprites_scaled; 1796 }; 1797 1798 struct drm_i915_private { 1799 struct drm_device drm; 1800 1801 struct kmem_cache *objects; 1802 struct kmem_cache *vmas; 1803 struct kmem_cache *requests; 1804 struct kmem_cache *dependencies; 1805 1806 const struct intel_device_info info; 1807 1808 int relative_constants_mode; 1809 1810 void __iomem *regs; 1811 1812 struct intel_uncore uncore; 1813 1814 struct i915_virtual_gpu vgpu; 1815 1816 struct intel_gvt *gvt; 1817 1818 struct intel_guc guc; 1819 1820 struct intel_csr csr; 1821 1822 struct intel_gmbus gmbus[GMBUS_NUM_PINS]; 1823 1824 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 1825 * controller on different i2c buses. */ 1826 struct mutex gmbus_mutex; 1827 1828 /** 1829 * Base address of the gmbus and gpio block. 1830 */ 1831 uint32_t gpio_mmio_base; 1832 1833 /* MMIO base address for MIPI regs */ 1834 uint32_t mipi_mmio_base; 1835 1836 uint32_t psr_mmio_base; 1837 1838 uint32_t pps_mmio_base; 1839 1840 wait_queue_head_t gmbus_wait_queue; 1841 1842 struct pci_dev *bridge_dev; 1843 struct i915_gem_context *kernel_context; 1844 struct intel_engine_cs *engine[I915_NUM_ENGINES]; 1845 struct i915_vma *semaphore; 1846 1847 struct drm_dma_handle *status_page_dmah; 1848 struct resource mch_res; 1849 1850 /* protects the irq masks */ 1851 spinlock_t irq_lock; 1852 1853 /* protects the mmio flip data */ 1854 spinlock_t mmio_flip_lock; 1855 1856 bool display_irqs_enabled; 1857 1858 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1859 struct pm_qos_request pm_qos; 1860 1861 /* Sideband mailbox protection */ 1862 struct mutex sb_lock; 1863 1864 /** Cached value of IMR to avoid reads in updating the bitfield */ 1865 union { 1866 u32 irq_mask; 1867 u32 de_irq_mask[I915_MAX_PIPES]; 1868 }; 1869 u32 gt_irq_mask; 1870 u32 pm_imr; 1871 u32 pm_ier; 1872 u32 pm_rps_events; 1873 u32 pm_guc_events; 1874 u32 pipestat_irq_mask[I915_MAX_PIPES]; 1875 1876 struct i915_hotplug hotplug; 1877 struct intel_fbc fbc; 1878 struct i915_drrs drrs; 1879 struct intel_opregion opregion; 1880 struct intel_vbt_data vbt; 1881 1882 bool preserve_bios_swizzle; 1883 1884 /* overlay */ 1885 struct intel_overlay *overlay; 1886 1887 /* backlight registers and fields in struct intel_panel */ 1888 struct mutex backlight_lock; 1889 1890 /* LVDS info */ 1891 bool no_aux_handshake; 1892 1893 /* protects panel power sequencer state */ 1894 struct mutex pps_mutex; 1895 1896 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1897 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1898 1899 unsigned int fsb_freq, mem_freq, is_ddr3; 1900 unsigned int skl_preferred_vco_freq; 1901 unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; 1902 unsigned int max_dotclk_freq; 1903 unsigned int rawclk_freq; 1904 unsigned int hpll_freq; 1905 unsigned int czclk_freq; 1906 1907 struct { 1908 unsigned int vco, ref; 1909 } cdclk_pll; 1910 1911 /** 1912 * wq - Driver workqueue for GEM. 1913 * 1914 * NOTE: Work items scheduled here are not allowed to grab any modeset 1915 * locks, for otherwise the flushing done in the pageflip code will 1916 * result in deadlocks. 1917 */ 1918 struct workqueue_struct *wq; 1919 1920 /* Display functions */ 1921 struct drm_i915_display_funcs display; 1922 1923 /* PCH chipset type */ 1924 enum intel_pch pch_type; 1925 unsigned short pch_id; 1926 1927 unsigned long quirks; 1928 1929 enum modeset_restore modeset_restore; 1930 struct mutex modeset_restore_lock; 1931 struct drm_atomic_state *modeset_restore_state; 1932 struct drm_modeset_acquire_ctx reset_ctx; 1933 1934 struct list_head vm_list; /* Global list of all address spaces */ 1935 struct i915_ggtt ggtt; /* VM representing the global address space */ 1936 1937 struct i915_gem_mm mm; 1938 DECLARE_HASHTABLE(mm_structs, 7); 1939 struct mutex mm_lock; 1940 1941 /* The hw wants to have a stable context identifier for the lifetime 1942 * of the context (for OA, PASID, faults, etc). This is limited 1943 * in execlists to 21 bits. 1944 */ 1945 struct ida context_hw_ida; 1946 #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ 1947 1948 /* Kernel Modesetting */ 1949 1950 struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1951 struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; 1952 wait_queue_head_t pending_flip_queue; 1953 1954 #ifdef CONFIG_DEBUG_FS 1955 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; 1956 #endif 1957 1958 /* dpll and cdclk state is protected by connection_mutex */ 1959 int num_shared_dpll; 1960 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1961 const struct intel_dpll_mgr *dpll_mgr; 1962 1963 /* 1964 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll. 1965 * Must be global rather than per dpll, because on some platforms 1966 * plls share registers. 1967 */ 1968 struct mutex dpll_lock; 1969 1970 unsigned int active_crtcs; 1971 unsigned int min_pixclk[I915_MAX_PIPES]; 1972 1973 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1974 1975 struct i915_workarounds workarounds; 1976 1977 struct i915_frontbuffer_tracking fb_tracking; 1978 1979 struct intel_atomic_helper { 1980 struct llist_head free_list; 1981 struct work_struct free_work; 1982 } atomic_helper; 1983 1984 u16 orig_clock; 1985 1986 bool mchbar_need_disable; 1987 1988 struct intel_l3_parity l3_parity; 1989 1990 /* Cannot be determined by PCIID. You must always read a register. */ 1991 u32 edram_cap; 1992 1993 /* gen6+ rps state */ 1994 struct intel_gen6_power_mgmt rps; 1995 1996 /* ilk-only ips/rps state. Everything in here is protected by the global 1997 * mchdev_lock in intel_pm.c */ 1998 struct intel_ilk_power_mgmt ips; 1999 2000 struct i915_power_domains power_domains; 2001 2002 struct i915_psr psr; 2003 2004 struct i915_gpu_error gpu_error; 2005 2006 struct drm_i915_gem_object *vlv_pctx; 2007 2008 #ifdef CONFIG_DRM_FBDEV_EMULATION 2009 /* list of fbdev register on this device */ 2010 struct intel_fbdev *fbdev; 2011 struct work_struct fbdev_suspend_work; 2012 #endif 2013 2014 struct drm_property *broadcast_rgb_property; 2015 struct drm_property *force_audio_property; 2016 2017 /* hda/i915 audio component */ 2018 struct i915_audio_component *audio_component; 2019 bool audio_component_registered; 2020 /** 2021 * av_mutex - mutex for audio/video sync 2022 * 2023 */ 2024 struct mutex av_mutex; 2025 2026 uint32_t hw_context_size; 2027 struct list_head context_list; 2028 2029 u32 fdi_rx_config; 2030 2031 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ 2032 u32 chv_phy_control; 2033 /* 2034 * Shadows for CHV DPLL_MD regs to keep the state 2035 * checker somewhat working in the presence hardware 2036 * crappiness (can't read out DPLL_MD for pipes B & C). 2037 */ 2038 u32 chv_dpll_md[I915_MAX_PIPES]; 2039 u32 bxt_phy_grc; 2040 2041 u32 suspend_count; 2042 bool suspended_to_idle; 2043 struct i915_suspend_saved_registers regfile; 2044 struct vlv_s0ix_state vlv_s0ix_state; 2045 2046 enum { 2047 I915_SAGV_UNKNOWN = 0, 2048 I915_SAGV_DISABLED, 2049 I915_SAGV_ENABLED, 2050 I915_SAGV_NOT_CONTROLLED 2051 } sagv_status; 2052 2053 struct { 2054 /* 2055 * Raw watermark latency values: 2056 * in 0.1us units for WM0, 2057 * in 0.5us units for WM1+. 2058 */ 2059 /* primary */ 2060 uint16_t pri_latency[5]; 2061 /* sprite */ 2062 uint16_t spr_latency[5]; 2063 /* cursor */ 2064 uint16_t cur_latency[5]; 2065 /* 2066 * Raw watermark memory latency values 2067 * for SKL for all 8 levels 2068 * in 1us units. 2069 */ 2070 uint16_t skl_latency[8]; 2071 2072 /* current hardware state */ 2073 union { 2074 struct ilk_wm_values hw; 2075 struct skl_wm_values skl_hw; 2076 struct vlv_wm_values vlv; 2077 }; 2078 2079 uint8_t max_level; 2080 2081 /* 2082 * Should be held around atomic WM register writing; also 2083 * protects * intel_crtc->wm.active and 2084 * cstate->wm.need_postvbl_update. 2085 */ 2086 struct mutex wm_mutex; 2087 2088 /* 2089 * Set during HW readout of watermarks/DDB. Some platforms 2090 * need to know when we're still using BIOS-provided values 2091 * (which we don't fully trust). 2092 */ 2093 bool distrust_bios_wm; 2094 } wm; 2095 2096 struct i915_runtime_pm pm; 2097 2098 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 2099 struct { 2100 void (*resume)(struct drm_i915_private *); 2101 void (*cleanup_engine)(struct intel_engine_cs *engine); 2102 2103 struct list_head timelines; 2104 struct i915_gem_timeline global_timeline; 2105 u32 active_requests; 2106 2107 /** 2108 * Is the GPU currently considered idle, or busy executing 2109 * userspace requests? Whilst idle, we allow runtime power 2110 * management to power down the hardware and display clocks. 2111 * In order to reduce the effect on performance, there 2112 * is a slight delay before we do so. 2113 */ 2114 bool awake; 2115 2116 /** 2117 * We leave the user IRQ off as much as possible, 2118 * but this means that requests will finish and never 2119 * be retired once the system goes idle. Set a timer to 2120 * fire periodically while the ring is running. When it 2121 * fires, go retire requests. 2122 */ 2123 struct delayed_work retire_work; 2124 2125 /** 2126 * When we detect an idle GPU, we want to turn on 2127 * powersaving features. So once we see that there 2128 * are no more requests outstanding and no more 2129 * arrive within a small period of time, we fire 2130 * off the idle_work. 2131 */ 2132 struct delayed_work idle_work; 2133 2134 ktime_t last_init_time; 2135 } gt; 2136 2137 /* perform PHY state sanity checks? */ 2138 bool chv_phy_assert[2]; 2139 2140 /* Used to save the pipe-to-encoder mapping for audio */ 2141 struct intel_encoder *av_enc_map[I915_MAX_PIPES]; 2142 2143 /* 2144 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 2145 * will be rejected. Instead look for a better place. 2146 */ 2147 }; 2148 2149 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 2150 { 2151 return container_of(dev, struct drm_i915_private, drm); 2152 } 2153 2154 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) 2155 { 2156 return to_i915(dev_get_drvdata(kdev)); 2157 } 2158 2159 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) 2160 { 2161 return container_of(guc, struct drm_i915_private, guc); 2162 } 2163 2164 /* Simple iterator over all initialised engines */ 2165 #define for_each_engine(engine__, dev_priv__, id__) \ 2166 for ((id__) = 0; \ 2167 (id__) < I915_NUM_ENGINES; \ 2168 (id__)++) \ 2169 for_each_if ((engine__) = (dev_priv__)->engine[(id__)]) 2170 2171 #define __mask_next_bit(mask) ({ \ 2172 int __idx = ffs(mask) - 1; \ 2173 mask &= ~BIT(__idx); \ 2174 __idx; \ 2175 }) 2176 2177 /* Iterator over subset of engines selected by mask */ 2178 #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \ 2179 for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \ 2180 tmp__ ? (engine__ = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : 0; ) 2181 2182 enum hdmi_force_audio { 2183 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 2184 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 2185 HDMI_AUDIO_AUTO, /* trust EDID */ 2186 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 2187 }; 2188 2189 #define I915_GTT_OFFSET_NONE ((u32)-1) 2190 2191 /* 2192 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is 2193 * considered to be the frontbuffer for the given plane interface-wise. This 2194 * doesn't mean that the hw necessarily already scans it out, but that any 2195 * rendering (by the cpu or gpu) will land in the frontbuffer eventually. 2196 * 2197 * We have one bit per pipe and per scanout plane type. 2198 */ 2199 #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5 2200 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 2201 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ 2202 (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2203 #define INTEL_FRONTBUFFER_CURSOR(pipe) \ 2204 (1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2205 #define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \ 2206 (1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2207 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 2208 (1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) 2209 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 2210 (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2211 2212 /* 2213 * Optimised SGL iterator for GEM objects 2214 */ 2215 static __always_inline struct sgt_iter { 2216 struct scatterlist *sgp; 2217 union { 2218 unsigned long pfn; 2219 dma_addr_t dma; 2220 }; 2221 unsigned int curr; 2222 unsigned int max; 2223 } __sgt_iter(struct scatterlist *sgl, bool dma) { 2224 struct sgt_iter s = { .sgp = sgl }; 2225 2226 if (s.sgp) { 2227 s.max = s.curr = s.sgp->offset; 2228 s.max += s.sgp->length; 2229 if (dma) 2230 s.dma = sg_dma_address(s.sgp); 2231 else 2232 s.pfn = page_to_pfn(sg_page(s.sgp)); 2233 } 2234 2235 return s; 2236 } 2237 2238 static inline struct scatterlist *____sg_next(struct scatterlist *sg) 2239 { 2240 ++sg; 2241 if (unlikely(sg_is_chain(sg))) 2242 sg = sg_chain_ptr(sg); 2243 return sg; 2244 } 2245 2246 /** 2247 * __sg_next - return the next scatterlist entry in a list 2248 * @sg: The current sg entry 2249 * 2250 * Description: 2251 * If the entry is the last, return NULL; otherwise, step to the next 2252 * element in the array (@sg@+1). If that's a chain pointer, follow it; 2253 * otherwise just return the pointer to the current element. 2254 **/ 2255 static inline struct scatterlist *__sg_next(struct scatterlist *sg) 2256 { 2257 #ifdef CONFIG_DEBUG_SG 2258 BUG_ON(sg->sg_magic != SG_MAGIC); 2259 #endif 2260 return sg_is_last(sg) ? NULL : ____sg_next(sg); 2261 } 2262 2263 /** 2264 * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table 2265 * @__dmap: DMA address (output) 2266 * @__iter: 'struct sgt_iter' (iterator state, internal) 2267 * @__sgt: sg_table to iterate over (input) 2268 */ 2269 #define for_each_sgt_dma(__dmap, __iter, __sgt) \ 2270 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ 2271 ((__dmap) = (__iter).dma + (__iter).curr); \ 2272 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ 2273 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0)) 2274 2275 /** 2276 * for_each_sgt_page - iterate over the pages of the given sg_table 2277 * @__pp: page pointer (output) 2278 * @__iter: 'struct sgt_iter' (iterator state, internal) 2279 * @__sgt: sg_table to iterate over (input) 2280 */ 2281 #define for_each_sgt_page(__pp, __iter, __sgt) \ 2282 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ 2283 ((__pp) = (__iter).pfn == 0 ? NULL : \ 2284 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ 2285 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ 2286 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) 2287 2288 /* 2289 * A command that requires special handling by the command parser. 2290 */ 2291 struct drm_i915_cmd_descriptor { 2292 /* 2293 * Flags describing how the command parser processes the command. 2294 * 2295 * CMD_DESC_FIXED: The command has a fixed length if this is set, 2296 * a length mask if not set 2297 * CMD_DESC_SKIP: The command is allowed but does not follow the 2298 * standard length encoding for the opcode range in 2299 * which it falls 2300 * CMD_DESC_REJECT: The command is never allowed 2301 * CMD_DESC_REGISTER: The command should be checked against the 2302 * register whitelist for the appropriate ring 2303 * CMD_DESC_MASTER: The command is allowed if the submitting process 2304 * is the DRM master 2305 */ 2306 u32 flags; 2307 #define CMD_DESC_FIXED (1<<0) 2308 #define CMD_DESC_SKIP (1<<1) 2309 #define CMD_DESC_REJECT (1<<2) 2310 #define CMD_DESC_REGISTER (1<<3) 2311 #define CMD_DESC_BITMASK (1<<4) 2312 #define CMD_DESC_MASTER (1<<5) 2313 2314 /* 2315 * The command's unique identification bits and the bitmask to get them. 2316 * This isn't strictly the opcode field as defined in the spec and may 2317 * also include type, subtype, and/or subop fields. 2318 */ 2319 struct { 2320 u32 value; 2321 u32 mask; 2322 } cmd; 2323 2324 /* 2325 * The command's length. The command is either fixed length (i.e. does 2326 * not include a length field) or has a length field mask. The flag 2327 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has 2328 * a length mask. All command entries in a command table must include 2329 * length information. 2330 */ 2331 union { 2332 u32 fixed; 2333 u32 mask; 2334 } length; 2335 2336 /* 2337 * Describes where to find a register address in the command to check 2338 * against the ring's register whitelist. Only valid if flags has the 2339 * CMD_DESC_REGISTER bit set. 2340 * 2341 * A non-zero step value implies that the command may access multiple 2342 * registers in sequence (e.g. LRI), in that case step gives the 2343 * distance in dwords between individual offset fields. 2344 */ 2345 struct { 2346 u32 offset; 2347 u32 mask; 2348 u32 step; 2349 } reg; 2350 2351 #define MAX_CMD_DESC_BITMASKS 3 2352 /* 2353 * Describes command checks where a particular dword is masked and 2354 * compared against an expected value. If the command does not match 2355 * the expected value, the parser rejects it. Only valid if flags has 2356 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero 2357 * are valid. 2358 * 2359 * If the check specifies a non-zero condition_mask then the parser 2360 * only performs the check when the bits specified by condition_mask 2361 * are non-zero. 2362 */ 2363 struct { 2364 u32 offset; 2365 u32 mask; 2366 u32 expected; 2367 u32 condition_offset; 2368 u32 condition_mask; 2369 } bits[MAX_CMD_DESC_BITMASKS]; 2370 }; 2371 2372 /* 2373 * A table of commands requiring special handling by the command parser. 2374 * 2375 * Each engine has an array of tables. Each table consists of an array of 2376 * command descriptors, which must be sorted with command opcodes in 2377 * ascending order. 2378 */ 2379 struct drm_i915_cmd_table { 2380 const struct drm_i915_cmd_descriptor *table; 2381 int count; 2382 }; 2383 2384 static inline const struct intel_device_info * 2385 intel_info(const struct drm_i915_private *dev_priv) 2386 { 2387 return &dev_priv->info; 2388 } 2389 2390 #define INTEL_INFO(dev_priv) intel_info((dev_priv)) 2391 2392 #define INTEL_GEN(dev_priv) ((dev_priv)->info.gen) 2393 #define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id) 2394 2395 #define REVID_FOREVER 0xff 2396 #define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) 2397 2398 #define GEN_FOREVER (0) 2399 /* 2400 * Returns true if Gen is in inclusive range [Start, End]. 2401 * 2402 * Use GEN_FOREVER for unbound start and or end. 2403 */ 2404 #define IS_GEN(dev_priv, s, e) ({ \ 2405 unsigned int __s = (s), __e = (e); \ 2406 BUILD_BUG_ON(!__builtin_constant_p(s)); \ 2407 BUILD_BUG_ON(!__builtin_constant_p(e)); \ 2408 if ((__s) != GEN_FOREVER) \ 2409 __s = (s) - 1; \ 2410 if ((__e) == GEN_FOREVER) \ 2411 __e = BITS_PER_LONG - 1; \ 2412 else \ 2413 __e = (e) - 1; \ 2414 !!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \ 2415 }) 2416 2417 /* 2418 * Return true if revision is in range [since,until] inclusive. 2419 * 2420 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until. 2421 */ 2422 #define IS_REVID(p, since, until) \ 2423 (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) 2424 2425 #define IS_I830(dev_priv) (INTEL_DEVID(dev_priv) == 0x3577) 2426 #define IS_845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562) 2427 #define IS_I85X(dev_priv) ((dev_priv)->info.is_i85x) 2428 #define IS_I865G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2572) 2429 #define IS_I915G(dev_priv) ((dev_priv)->info.is_i915g) 2430 #define IS_I915GM(dev_priv) (INTEL_DEVID(dev_priv) == 0x2592) 2431 #define IS_I945G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2772) 2432 #define IS_I945GM(dev_priv) ((dev_priv)->info.is_i945gm) 2433 #define IS_BROADWATER(dev_priv) ((dev_priv)->info.is_broadwater) 2434 #define IS_CRESTLINE(dev_priv) ((dev_priv)->info.is_crestline) 2435 #define IS_GM45(dev_priv) (INTEL_DEVID(dev_priv) == 0x2A42) 2436 #define IS_G4X(dev_priv) ((dev_priv)->info.is_g4x) 2437 #define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001) 2438 #define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011) 2439 #define IS_PINEVIEW(dev_priv) ((dev_priv)->info.is_pineview) 2440 #define IS_G33(dev_priv) ((dev_priv)->info.is_g33) 2441 #define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046) 2442 #define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.is_ivybridge) 2443 #define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \ 2444 INTEL_DEVID(dev_priv) == 0x0152 || \ 2445 INTEL_DEVID(dev_priv) == 0x015a) 2446 #define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.is_valleyview) 2447 #define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.is_cherryview) 2448 #define IS_HASWELL(dev_priv) ((dev_priv)->info.is_haswell) 2449 #define IS_BROADWELL(dev_priv) ((dev_priv)->info.is_broadwell) 2450 #define IS_SKYLAKE(dev_priv) ((dev_priv)->info.is_skylake) 2451 #define IS_BROXTON(dev_priv) ((dev_priv)->info.is_broxton) 2452 #define IS_KABYLAKE(dev_priv) ((dev_priv)->info.is_kabylake) 2453 #define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile) 2454 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ 2455 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) 2456 #define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \ 2457 ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \ 2458 (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \ 2459 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)) 2460 /* ULX machines are also considered ULT. */ 2461 #define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \ 2462 (INTEL_DEVID(dev_priv) & 0xf) == 0xe) 2463 #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \ 2464 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) 2465 #define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \ 2466 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00) 2467 #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \ 2468 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) 2469 /* ULX machines are also considered ULT. */ 2470 #define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \ 2471 INTEL_DEVID(dev_priv) == 0x0A1E) 2472 #define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \ 2473 INTEL_DEVID(dev_priv) == 0x1913 || \ 2474 INTEL_DEVID(dev_priv) == 0x1916 || \ 2475 INTEL_DEVID(dev_priv) == 0x1921 || \ 2476 INTEL_DEVID(dev_priv) == 0x1926) 2477 #define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \ 2478 INTEL_DEVID(dev_priv) == 0x1915 || \ 2479 INTEL_DEVID(dev_priv) == 0x191E) 2480 #define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \ 2481 INTEL_DEVID(dev_priv) == 0x5913 || \ 2482 INTEL_DEVID(dev_priv) == 0x5916 || \ 2483 INTEL_DEVID(dev_priv) == 0x5921 || \ 2484 INTEL_DEVID(dev_priv) == 0x5926) 2485 #define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \ 2486 INTEL_DEVID(dev_priv) == 0x5915 || \ 2487 INTEL_DEVID(dev_priv) == 0x591E) 2488 #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2489 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) 2490 #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2491 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030) 2492 2493 #define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support) 2494 2495 #define SKL_REVID_A0 0x0 2496 #define SKL_REVID_B0 0x1 2497 #define SKL_REVID_C0 0x2 2498 #define SKL_REVID_D0 0x3 2499 #define SKL_REVID_E0 0x4 2500 #define SKL_REVID_F0 0x5 2501 #define SKL_REVID_G0 0x6 2502 #define SKL_REVID_H0 0x7 2503 2504 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) 2505 2506 #define BXT_REVID_A0 0x0 2507 #define BXT_REVID_A1 0x1 2508 #define BXT_REVID_B0 0x3 2509 #define BXT_REVID_C0 0x9 2510 2511 #define IS_BXT_REVID(dev_priv, since, until) \ 2512 (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until)) 2513 2514 #define KBL_REVID_A0 0x0 2515 #define KBL_REVID_B0 0x1 2516 #define KBL_REVID_C0 0x2 2517 #define KBL_REVID_D0 0x3 2518 #define KBL_REVID_E0 0x4 2519 2520 #define IS_KBL_REVID(dev_priv, since, until) \ 2521 (IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until)) 2522 2523 /* 2524 * The genX designation typically refers to the render engine, so render 2525 * capability related checks should use IS_GEN, while display and other checks 2526 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2527 * chips, etc.). 2528 */ 2529 #define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1))) 2530 #define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2))) 2531 #define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3))) 2532 #define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4))) 2533 #define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5))) 2534 #define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6))) 2535 #define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7))) 2536 #define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8))) 2537 2538 #define ENGINE_MASK(id) BIT(id) 2539 #define RENDER_RING ENGINE_MASK(RCS) 2540 #define BSD_RING ENGINE_MASK(VCS) 2541 #define BLT_RING ENGINE_MASK(BCS) 2542 #define VEBOX_RING ENGINE_MASK(VECS) 2543 #define BSD2_RING ENGINE_MASK(VCS2) 2544 #define ALL_ENGINES (~0) 2545 2546 #define HAS_ENGINE(dev_priv, id) \ 2547 (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id))) 2548 2549 #define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS) 2550 #define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2) 2551 #define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS) 2552 #define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS) 2553 2554 #define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc) 2555 #define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop) 2556 #define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED)) 2557 #define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \ 2558 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv)) 2559 2560 #define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical) 2561 2562 #define HAS_HW_CONTEXTS(dev_priv) ((dev_priv)->info.has_hw_contexts) 2563 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ 2564 ((dev_priv)->info.has_logical_ring_contexts) 2565 #define USES_PPGTT(dev_priv) (i915.enable_ppgtt) 2566 #define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2) 2567 #define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3) 2568 2569 #define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay) 2570 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ 2571 ((dev_priv)->info.overlay_needs_physical) 2572 2573 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 2574 #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_845G(dev_priv)) 2575 2576 /* WaRsDisableCoarsePowerGating:skl,bxt */ 2577 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ 2578 (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \ 2579 IS_SKL_GT3(dev_priv) || \ 2580 IS_SKL_GT4(dev_priv)) 2581 2582 /* 2583 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2584 * even when in MSI mode. This results in spurious interrupt warnings if the 2585 * legacy irq no. is shared with another device. The kernel then disables that 2586 * interrupt source and so prevents the other device from working properly. 2587 */ 2588 #define HAS_AUX_IRQ(dev_priv) ((dev_priv)->info.gen >= 5) 2589 #define HAS_GMBUS_IRQ(dev_priv) ((dev_priv)->info.has_gmbus_irq) 2590 2591 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 2592 * rows, which changed the alignment requirements and fence programming. 2593 */ 2594 #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \ 2595 !(IS_I915G(dev_priv) || \ 2596 IS_I915GM(dev_priv))) 2597 #define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv) 2598 #define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug) 2599 2600 #define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) 2601 #define HAS_PIPE_CXSR(dev_priv) ((dev_priv)->info.has_pipe_cxsr) 2602 #define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc) 2603 2604 #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) 2605 2606 #define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst) 2607 2608 #define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi) 2609 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg) 2610 #define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr) 2611 #define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6) 2612 #define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p) 2613 2614 #define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr) 2615 2616 #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm) 2617 #define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc) 2618 2619 /* 2620 * For now, anything with a GuC requires uCode loading, and then supports 2621 * command submission once loaded. But these are logically independent 2622 * properties, so we have separate macros to test them. 2623 */ 2624 #define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc) 2625 #define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) 2626 #define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv)) 2627 2628 #define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer) 2629 2630 #define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu) 2631 2632 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 2633 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2634 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 2635 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 2636 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 2637 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2638 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2639 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2640 #define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200 2641 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 2642 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 2643 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ 2644 2645 #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) 2646 #define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP) 2647 #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT) 2648 #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT) 2649 #define HAS_PCH_LPT_LP(dev_priv) \ 2650 ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) 2651 #define HAS_PCH_LPT_H(dev_priv) \ 2652 ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) 2653 #define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT) 2654 #define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX) 2655 #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP) 2656 #define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE) 2657 2658 #define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display) 2659 2660 #define HAS_LSPCON(dev_priv) (IS_GEN9(dev_priv)) 2661 2662 /* DPF == dynamic parity feature */ 2663 #define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf) 2664 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \ 2665 2 : HAS_L3_DPF(dev_priv)) 2666 2667 #define GT_FREQUENCY_MULTIPLIER 50 2668 #define GEN9_FREQ_SCALER 3 2669 2670 #define HAS_DECOUPLED_MMIO(dev_priv) (INTEL_INFO(dev_priv)->has_decoupled_mmio) 2671 2672 #include "i915_trace.h" 2673 2674 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) 2675 { 2676 #ifdef CONFIG_INTEL_IOMMU 2677 if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped) 2678 return true; 2679 #endif 2680 return false; 2681 } 2682 2683 extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); 2684 extern int i915_resume_switcheroo(struct drm_device *dev); 2685 2686 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, 2687 int enable_ppgtt); 2688 2689 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value); 2690 2691 /* i915_drv.c */ 2692 void __printf(3, 4) 2693 __i915_printk(struct drm_i915_private *dev_priv, const char *level, 2694 const char *fmt, ...); 2695 2696 #define i915_report_error(dev_priv, fmt, ...) \ 2697 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) 2698 2699 #ifdef CONFIG_COMPAT 2700 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2701 unsigned long arg); 2702 #else 2703 #define i915_compat_ioctl NULL 2704 #endif 2705 extern const struct dev_pm_ops i915_pm_ops; 2706 2707 extern int i915_driver_load(struct pci_dev *pdev, 2708 const struct pci_device_id *ent); 2709 extern void i915_driver_unload(struct drm_device *dev); 2710 extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); 2711 extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); 2712 extern void i915_reset(struct drm_i915_private *dev_priv); 2713 extern int intel_guc_reset(struct drm_i915_private *dev_priv); 2714 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); 2715 extern void intel_hangcheck_init(struct drm_i915_private *dev_priv); 2716 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2717 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 2718 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 2719 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 2720 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2721 2722 /* intel_hotplug.c */ 2723 void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, 2724 u32 pin_mask, u32 long_mask); 2725 void intel_hpd_init(struct drm_i915_private *dev_priv); 2726 void intel_hpd_init_work(struct drm_i915_private *dev_priv); 2727 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2728 bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); 2729 bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 2730 void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 2731 2732 /* i915_irq.c */ 2733 static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) 2734 { 2735 unsigned long delay; 2736 2737 if (unlikely(!i915.enable_hangcheck)) 2738 return; 2739 2740 /* Don't continually defer the hangcheck so that it is always run at 2741 * least once after work has been scheduled on any ring. Otherwise, 2742 * we will ignore a hung ring if a second ring is kept busy. 2743 */ 2744 2745 delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES); 2746 queue_delayed_work(system_long_wq, 2747 &dev_priv->gpu_error.hangcheck_work, delay); 2748 } 2749 2750 __printf(3, 4) 2751 void i915_handle_error(struct drm_i915_private *dev_priv, 2752 u32 engine_mask, 2753 const char *fmt, ...); 2754 2755 extern void intel_irq_init(struct drm_i915_private *dev_priv); 2756 int intel_irq_install(struct drm_i915_private *dev_priv); 2757 void intel_irq_uninstall(struct drm_i915_private *dev_priv); 2758 2759 extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv); 2760 extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, 2761 bool restore_forcewake); 2762 extern void intel_uncore_init(struct drm_i915_private *dev_priv); 2763 extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); 2764 extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); 2765 extern void intel_uncore_fini(struct drm_i915_private *dev_priv); 2766 extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, 2767 bool restore); 2768 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 2769 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 2770 enum forcewake_domains domains); 2771 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 2772 enum forcewake_domains domains); 2773 /* Like above but the caller must manage the uncore.lock itself. 2774 * Must be used with I915_READ_FW and friends. 2775 */ 2776 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 2777 enum forcewake_domains domains); 2778 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 2779 enum forcewake_domains domains); 2780 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); 2781 2782 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 2783 2784 int intel_wait_for_register(struct drm_i915_private *dev_priv, 2785 i915_reg_t reg, 2786 const u32 mask, 2787 const u32 value, 2788 const unsigned long timeout_ms); 2789 int intel_wait_for_register_fw(struct drm_i915_private *dev_priv, 2790 i915_reg_t reg, 2791 const u32 mask, 2792 const u32 value, 2793 const unsigned long timeout_ms); 2794 2795 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) 2796 { 2797 return dev_priv->gvt; 2798 } 2799 2800 static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) 2801 { 2802 return dev_priv->vgpu.active; 2803 } 2804 2805 void 2806 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 2807 u32 status_mask); 2808 2809 void 2810 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 2811 u32 status_mask); 2812 2813 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 2814 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 2815 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 2816 uint32_t mask, 2817 uint32_t bits); 2818 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 2819 uint32_t interrupt_mask, 2820 uint32_t enabled_irq_mask); 2821 static inline void 2822 ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) 2823 { 2824 ilk_update_display_irq(dev_priv, bits, bits); 2825 } 2826 static inline void 2827 ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) 2828 { 2829 ilk_update_display_irq(dev_priv, bits, 0); 2830 } 2831 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 2832 enum pipe pipe, 2833 uint32_t interrupt_mask, 2834 uint32_t enabled_irq_mask); 2835 static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, 2836 enum pipe pipe, uint32_t bits) 2837 { 2838 bdw_update_pipe_irq(dev_priv, pipe, bits, bits); 2839 } 2840 static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, 2841 enum pipe pipe, uint32_t bits) 2842 { 2843 bdw_update_pipe_irq(dev_priv, pipe, bits, 0); 2844 } 2845 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 2846 uint32_t interrupt_mask, 2847 uint32_t enabled_irq_mask); 2848 static inline void 2849 ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) 2850 { 2851 ibx_display_interrupt_update(dev_priv, bits, bits); 2852 } 2853 static inline void 2854 ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) 2855 { 2856 ibx_display_interrupt_update(dev_priv, bits, 0); 2857 } 2858 2859 /* i915_gem.c */ 2860 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 2861 struct drm_file *file_priv); 2862 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 2863 struct drm_file *file_priv); 2864 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 2865 struct drm_file *file_priv); 2866 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 2867 struct drm_file *file_priv); 2868 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 2869 struct drm_file *file_priv); 2870 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 2871 struct drm_file *file_priv); 2872 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 2873 struct drm_file *file_priv); 2874 int i915_gem_execbuffer(struct drm_device *dev, void *data, 2875 struct drm_file *file_priv); 2876 int i915_gem_execbuffer2(struct drm_device *dev, void *data, 2877 struct drm_file *file_priv); 2878 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 2879 struct drm_file *file_priv); 2880 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 2881 struct drm_file *file); 2882 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 2883 struct drm_file *file); 2884 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 2885 struct drm_file *file_priv); 2886 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 2887 struct drm_file *file_priv); 2888 int i915_gem_set_tiling(struct drm_device *dev, void *data, 2889 struct drm_file *file_priv); 2890 int i915_gem_get_tiling(struct drm_device *dev, void *data, 2891 struct drm_file *file_priv); 2892 void i915_gem_init_userptr(struct drm_i915_private *dev_priv); 2893 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 2894 struct drm_file *file); 2895 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 2896 struct drm_file *file_priv); 2897 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 2898 struct drm_file *file_priv); 2899 int i915_gem_load_init(struct drm_device *dev); 2900 void i915_gem_load_cleanup(struct drm_device *dev); 2901 void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); 2902 int i915_gem_freeze(struct drm_i915_private *dev_priv); 2903 int i915_gem_freeze_late(struct drm_i915_private *dev_priv); 2904 2905 void *i915_gem_object_alloc(struct drm_device *dev); 2906 void i915_gem_object_free(struct drm_i915_gem_object *obj); 2907 void i915_gem_object_init(struct drm_i915_gem_object *obj, 2908 const struct drm_i915_gem_object_ops *ops); 2909 struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, 2910 u64 size); 2911 struct drm_i915_gem_object *i915_gem_object_create_from_data( 2912 struct drm_device *dev, const void *data, size_t size); 2913 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); 2914 void i915_gem_free_object(struct drm_gem_object *obj); 2915 2916 struct i915_vma * __must_check 2917 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 2918 const struct i915_ggtt_view *view, 2919 u64 size, 2920 u64 alignment, 2921 u64 flags); 2922 2923 int i915_gem_object_unbind(struct drm_i915_gem_object *obj); 2924 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 2925 2926 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); 2927 2928 static inline int __sg_page_count(const struct scatterlist *sg) 2929 { 2930 return sg->length >> PAGE_SHIFT; 2931 } 2932 2933 struct scatterlist * 2934 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 2935 unsigned int n, unsigned int *offset); 2936 2937 struct page * 2938 i915_gem_object_get_page(struct drm_i915_gem_object *obj, 2939 unsigned int n); 2940 2941 struct page * 2942 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 2943 unsigned int n); 2944 2945 dma_addr_t 2946 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 2947 unsigned long n); 2948 2949 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 2950 struct sg_table *pages); 2951 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 2952 2953 static inline int __must_check 2954 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 2955 { 2956 might_lock(&obj->mm.lock); 2957 2958 if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) 2959 return 0; 2960 2961 return __i915_gem_object_get_pages(obj); 2962 } 2963 2964 static inline void 2965 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 2966 { 2967 GEM_BUG_ON(!obj->mm.pages); 2968 2969 atomic_inc(&obj->mm.pages_pin_count); 2970 } 2971 2972 static inline bool 2973 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) 2974 { 2975 return atomic_read(&obj->mm.pages_pin_count); 2976 } 2977 2978 static inline void 2979 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 2980 { 2981 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 2982 GEM_BUG_ON(!obj->mm.pages); 2983 2984 atomic_dec(&obj->mm.pages_pin_count); 2985 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); 2986 } 2987 2988 static inline void 2989 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 2990 { 2991 __i915_gem_object_unpin_pages(obj); 2992 } 2993 2994 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */ 2995 I915_MM_NORMAL = 0, 2996 I915_MM_SHRINKER 2997 }; 2998 2999 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, 3000 enum i915_mm_subclass subclass); 3001 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj); 3002 3003 enum i915_map_type { 3004 I915_MAP_WB = 0, 3005 I915_MAP_WC, 3006 }; 3007 3008 /** 3009 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 3010 * @obj - the object to map into kernel address space 3011 * @type - the type of mapping, used to select pgprot_t 3012 * 3013 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's 3014 * pages and then returns a contiguous mapping of the backing storage into 3015 * the kernel address space. Based on the @type of mapping, the PTE will be 3016 * set to either WriteBack or WriteCombine (via pgprot_t). 3017 * 3018 * The caller is responsible for calling i915_gem_object_unpin_map() when the 3019 * mapping is no longer required. 3020 * 3021 * Returns the pointer through which to access the mapped object, or an 3022 * ERR_PTR() on error. 3023 */ 3024 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 3025 enum i915_map_type type); 3026 3027 /** 3028 * i915_gem_object_unpin_map - releases an earlier mapping 3029 * @obj - the object to unmap 3030 * 3031 * After pinning the object and mapping its pages, once you are finished 3032 * with your access, call i915_gem_object_unpin_map() to release the pin 3033 * upon the mapping. Once the pin count reaches zero, that mapping may be 3034 * removed. 3035 */ 3036 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) 3037 { 3038 i915_gem_object_unpin_pages(obj); 3039 } 3040 3041 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 3042 unsigned int *needs_clflush); 3043 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, 3044 unsigned int *needs_clflush); 3045 #define CLFLUSH_BEFORE 0x1 3046 #define CLFLUSH_AFTER 0x2 3047 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER) 3048 3049 static inline void 3050 i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) 3051 { 3052 i915_gem_object_unpin_pages(obj); 3053 } 3054 3055 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 3056 void i915_vma_move_to_active(struct i915_vma *vma, 3057 struct drm_i915_gem_request *req, 3058 unsigned int flags); 3059 int i915_gem_dumb_create(struct drm_file *file_priv, 3060 struct drm_device *dev, 3061 struct drm_mode_create_dumb *args); 3062 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 3063 uint32_t handle, uint64_t *offset); 3064 int i915_gem_mmap_gtt_version(void); 3065 3066 void i915_gem_track_fb(struct drm_i915_gem_object *old, 3067 struct drm_i915_gem_object *new, 3068 unsigned frontbuffer_bits); 3069 3070 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); 3071 3072 struct drm_i915_gem_request * 3073 i915_gem_find_active_request(struct intel_engine_cs *engine); 3074 3075 void i915_gem_retire_requests(struct drm_i915_private *dev_priv); 3076 3077 static inline bool i915_reset_in_progress(struct i915_gpu_error *error) 3078 { 3079 return unlikely(test_bit(I915_RESET_IN_PROGRESS, &error->flags)); 3080 } 3081 3082 static inline bool i915_terminally_wedged(struct i915_gpu_error *error) 3083 { 3084 return unlikely(test_bit(I915_WEDGED, &error->flags)); 3085 } 3086 3087 static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error) 3088 { 3089 return i915_reset_in_progress(error) | i915_terminally_wedged(error); 3090 } 3091 3092 static inline u32 i915_reset_count(struct i915_gpu_error *error) 3093 { 3094 return READ_ONCE(error->reset_count); 3095 } 3096 3097 void i915_gem_reset(struct drm_i915_private *dev_priv); 3098 void i915_gem_set_wedged(struct drm_i915_private *dev_priv); 3099 void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 3100 int __must_check i915_gem_init(struct drm_device *dev); 3101 int __must_check i915_gem_init_hw(struct drm_device *dev); 3102 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); 3103 void i915_gem_cleanup_engines(struct drm_device *dev); 3104 int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, 3105 unsigned int flags); 3106 int __must_check i915_gem_suspend(struct drm_device *dev); 3107 void i915_gem_resume(struct drm_device *dev); 3108 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 3109 int i915_gem_object_wait(struct drm_i915_gem_object *obj, 3110 unsigned int flags, 3111 long timeout, 3112 struct intel_rps_client *rps); 3113 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 3114 unsigned int flags, 3115 int priority); 3116 #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX 3117 3118 int __must_check 3119 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 3120 bool write); 3121 int __must_check 3122 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 3123 struct i915_vma * __must_check 3124 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3125 u32 alignment, 3126 const struct i915_ggtt_view *view); 3127 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); 3128 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 3129 int align); 3130 int i915_gem_open(struct drm_device *dev, struct drm_file *file); 3131 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 3132 3133 u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size, 3134 int tiling_mode); 3135 u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size, 3136 int tiling_mode, bool fenced); 3137 3138 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3139 enum i915_cache_level cache_level); 3140 3141 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 3142 struct dma_buf *dma_buf); 3143 3144 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 3145 struct drm_gem_object *gem_obj, int flags); 3146 3147 struct i915_vma * 3148 i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 3149 struct i915_address_space *vm, 3150 const struct i915_ggtt_view *view); 3151 3152 struct i915_vma * 3153 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 3154 struct i915_address_space *vm, 3155 const struct i915_ggtt_view *view); 3156 3157 static inline struct i915_hw_ppgtt * 3158 i915_vm_to_ppgtt(struct i915_address_space *vm) 3159 { 3160 return container_of(vm, struct i915_hw_ppgtt, base); 3161 } 3162 3163 static inline struct i915_vma * 3164 i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj, 3165 const struct i915_ggtt_view *view) 3166 { 3167 return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view); 3168 } 3169 3170 /* i915_gem_fence_reg.c */ 3171 int __must_check i915_vma_get_fence(struct i915_vma *vma); 3172 int __must_check i915_vma_put_fence(struct i915_vma *vma); 3173 3174 void i915_gem_restore_fences(struct drm_i915_private *dev_priv); 3175 3176 void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv); 3177 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, 3178 struct sg_table *pages); 3179 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, 3180 struct sg_table *pages); 3181 3182 /* i915_gem_context.c */ 3183 int __must_check i915_gem_context_init(struct drm_device *dev); 3184 void i915_gem_context_lost(struct drm_i915_private *dev_priv); 3185 void i915_gem_context_fini(struct drm_device *dev); 3186 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 3187 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 3188 int i915_switch_context(struct drm_i915_gem_request *req); 3189 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv); 3190 struct i915_vma * 3191 i915_gem_context_pin_legacy(struct i915_gem_context *ctx, 3192 unsigned int flags); 3193 void i915_gem_context_free(struct kref *ctx_ref); 3194 struct drm_i915_gem_object * 3195 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); 3196 struct i915_gem_context * 3197 i915_gem_context_create_gvt(struct drm_device *dev); 3198 3199 static inline struct i915_gem_context * 3200 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) 3201 { 3202 struct i915_gem_context *ctx; 3203 3204 lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex); 3205 3206 ctx = idr_find(&file_priv->context_idr, id); 3207 if (!ctx) 3208 return ERR_PTR(-ENOENT); 3209 3210 return ctx; 3211 } 3212 3213 static inline struct i915_gem_context * 3214 i915_gem_context_get(struct i915_gem_context *ctx) 3215 { 3216 kref_get(&ctx->ref); 3217 return ctx; 3218 } 3219 3220 static inline void i915_gem_context_put(struct i915_gem_context *ctx) 3221 { 3222 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 3223 kref_put(&ctx->ref, i915_gem_context_free); 3224 } 3225 3226 static inline struct intel_timeline * 3227 i915_gem_context_lookup_timeline(struct i915_gem_context *ctx, 3228 struct intel_engine_cs *engine) 3229 { 3230 struct i915_address_space *vm; 3231 3232 vm = ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base; 3233 return &vm->timeline.engine[engine->id]; 3234 } 3235 3236 static inline bool i915_gem_context_is_default(const struct i915_gem_context *c) 3237 { 3238 return c->user_handle == DEFAULT_CONTEXT_HANDLE; 3239 } 3240 3241 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 3242 struct drm_file *file); 3243 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 3244 struct drm_file *file); 3245 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 3246 struct drm_file *file_priv); 3247 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 3248 struct drm_file *file_priv); 3249 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, 3250 struct drm_file *file); 3251 3252 /* i915_gem_evict.c */ 3253 int __must_check i915_gem_evict_something(struct i915_address_space *vm, 3254 u64 min_size, u64 alignment, 3255 unsigned cache_level, 3256 u64 start, u64 end, 3257 unsigned flags); 3258 int __must_check i915_gem_evict_for_vma(struct i915_vma *target); 3259 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3260 3261 /* belongs in i915_gem_gtt.h */ 3262 static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) 3263 { 3264 wmb(); 3265 if (INTEL_GEN(dev_priv) < 6) 3266 intel_gtt_chipset_flush(); 3267 } 3268 3269 /* i915_gem_stolen.c */ 3270 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, 3271 struct drm_mm_node *node, u64 size, 3272 unsigned alignment); 3273 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, 3274 struct drm_mm_node *node, u64 size, 3275 unsigned alignment, u64 start, 3276 u64 end); 3277 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, 3278 struct drm_mm_node *node); 3279 int i915_gem_init_stolen(struct drm_i915_private *dev_priv); 3280 void i915_gem_cleanup_stolen(struct drm_device *dev); 3281 struct drm_i915_gem_object * 3282 i915_gem_object_create_stolen(struct drm_device *dev, u32 size); 3283 struct drm_i915_gem_object * 3284 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, 3285 u32 stolen_offset, 3286 u32 gtt_offset, 3287 u32 size); 3288 3289 /* i915_gem_internal.c */ 3290 struct drm_i915_gem_object * 3291 i915_gem_object_create_internal(struct drm_i915_private *dev_priv, 3292 unsigned int size); 3293 3294 /* i915_gem_shrinker.c */ 3295 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, 3296 unsigned long target, 3297 unsigned flags); 3298 #define I915_SHRINK_PURGEABLE 0x1 3299 #define I915_SHRINK_UNBOUND 0x2 3300 #define I915_SHRINK_BOUND 0x4 3301 #define I915_SHRINK_ACTIVE 0x8 3302 #define I915_SHRINK_VMAPS 0x10 3303 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 3304 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); 3305 void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv); 3306 3307 3308 /* i915_gem_tiling.c */ 3309 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 3310 { 3311 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3312 3313 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 3314 i915_gem_object_is_tiled(obj); 3315 } 3316 3317 /* i915_debugfs.c */ 3318 #ifdef CONFIG_DEBUG_FS 3319 int i915_debugfs_register(struct drm_i915_private *dev_priv); 3320 void i915_debugfs_unregister(struct drm_i915_private *dev_priv); 3321 int i915_debugfs_connector_add(struct drm_connector *connector); 3322 void intel_display_crc_init(struct drm_i915_private *dev_priv); 3323 #else 3324 static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;} 3325 static inline void i915_debugfs_unregister(struct drm_i915_private *dev_priv) {} 3326 static inline int i915_debugfs_connector_add(struct drm_connector *connector) 3327 { return 0; } 3328 static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {} 3329 #endif 3330 3331 /* i915_gpu_error.c */ 3332 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 3333 3334 __printf(2, 3) 3335 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 3336 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, 3337 const struct i915_error_state_file_priv *error); 3338 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, 3339 struct drm_i915_private *i915, 3340 size_t count, loff_t pos); 3341 static inline void i915_error_state_buf_release( 3342 struct drm_i915_error_state_buf *eb) 3343 { 3344 kfree(eb->buf); 3345 } 3346 void i915_capture_error_state(struct drm_i915_private *dev_priv, 3347 u32 engine_mask, 3348 const char *error_msg); 3349 void i915_error_state_get(struct drm_device *dev, 3350 struct i915_error_state_file_priv *error_priv); 3351 void i915_error_state_put(struct i915_error_state_file_priv *error_priv); 3352 void i915_destroy_error_state(struct drm_device *dev); 3353 3354 #else 3355 3356 static inline void i915_capture_error_state(struct drm_i915_private *dev_priv, 3357 u32 engine_mask, 3358 const char *error_msg) 3359 { 3360 } 3361 3362 static inline void i915_destroy_error_state(struct drm_device *dev) 3363 { 3364 } 3365 3366 #endif 3367 3368 const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3369 3370 /* i915_cmd_parser.c */ 3371 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); 3372 void intel_engine_init_cmd_parser(struct intel_engine_cs *engine); 3373 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); 3374 bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine); 3375 int intel_engine_cmd_parser(struct intel_engine_cs *engine, 3376 struct drm_i915_gem_object *batch_obj, 3377 struct drm_i915_gem_object *shadow_batch_obj, 3378 u32 batch_start_offset, 3379 u32 batch_len, 3380 bool is_master); 3381 3382 /* i915_suspend.c */ 3383 extern int i915_save_state(struct drm_device *dev); 3384 extern int i915_restore_state(struct drm_device *dev); 3385 3386 /* i915_sysfs.c */ 3387 void i915_setup_sysfs(struct drm_i915_private *dev_priv); 3388 void i915_teardown_sysfs(struct drm_i915_private *dev_priv); 3389 3390 /* intel_i2c.c */ 3391 extern int intel_setup_gmbus(struct drm_device *dev); 3392 extern void intel_teardown_gmbus(struct drm_device *dev); 3393 extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, 3394 unsigned int pin); 3395 3396 extern struct i2c_adapter * 3397 intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); 3398 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 3399 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 3400 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 3401 { 3402 return container_of(adapter, struct intel_gmbus, adapter)->force_bit; 3403 } 3404 extern void intel_i2c_reset(struct drm_device *dev); 3405 3406 /* intel_bios.c */ 3407 int intel_bios_init(struct drm_i915_private *dev_priv); 3408 bool intel_bios_is_valid_vbt(const void *buf, size_t size); 3409 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); 3410 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); 3411 bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); 3412 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); 3413 bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); 3414 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); 3415 bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, 3416 enum port port); 3417 bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, 3418 enum port port); 3419 3420 3421 /* intel_opregion.c */ 3422 #ifdef CONFIG_ACPI 3423 extern int intel_opregion_setup(struct drm_i915_private *dev_priv); 3424 extern void intel_opregion_register(struct drm_i915_private *dev_priv); 3425 extern void intel_opregion_unregister(struct drm_i915_private *dev_priv); 3426 extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv); 3427 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 3428 bool enable); 3429 extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, 3430 pci_power_t state); 3431 extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv); 3432 #else 3433 static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; } 3434 static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { } 3435 static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { } 3436 static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) 3437 { 3438 } 3439 static inline int 3440 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) 3441 { 3442 return 0; 3443 } 3444 static inline int 3445 intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state) 3446 { 3447 return 0; 3448 } 3449 static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev) 3450 { 3451 return -ENODEV; 3452 } 3453 #endif 3454 3455 /* intel_acpi.c */ 3456 #ifdef CONFIG_ACPI 3457 extern void intel_register_dsm_handler(void); 3458 extern void intel_unregister_dsm_handler(void); 3459 #else 3460 static inline void intel_register_dsm_handler(void) { return; } 3461 static inline void intel_unregister_dsm_handler(void) { return; } 3462 #endif /* CONFIG_ACPI */ 3463 3464 /* intel_device_info.c */ 3465 static inline struct intel_device_info * 3466 mkwrite_device_info(struct drm_i915_private *dev_priv) 3467 { 3468 return (struct intel_device_info *)&dev_priv->info; 3469 } 3470 3471 void intel_device_info_runtime_init(struct drm_i915_private *dev_priv); 3472 void intel_device_info_dump(struct drm_i915_private *dev_priv); 3473 3474 /* modesetting */ 3475 extern void intel_modeset_init_hw(struct drm_device *dev); 3476 extern int intel_modeset_init(struct drm_device *dev); 3477 extern void intel_modeset_gem_init(struct drm_device *dev); 3478 extern void intel_modeset_cleanup(struct drm_device *dev); 3479 extern int intel_connector_register(struct drm_connector *); 3480 extern void intel_connector_unregister(struct drm_connector *); 3481 extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, 3482 bool state); 3483 extern void intel_display_resume(struct drm_device *dev); 3484 extern void i915_redisable_vga(struct drm_i915_private *dev_priv); 3485 extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); 3486 extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); 3487 extern void intel_init_pch_refclk(struct drm_device *dev); 3488 extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); 3489 extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3490 bool enable); 3491 3492 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3493 struct drm_file *file); 3494 3495 /* overlay */ 3496 extern struct intel_overlay_error_state * 3497 intel_overlay_capture_error_state(struct drm_i915_private *dev_priv); 3498 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3499 struct intel_overlay_error_state *error); 3500 3501 extern struct intel_display_error_state * 3502 intel_display_capture_error_state(struct drm_i915_private *dev_priv); 3503 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3504 struct drm_i915_private *dev_priv, 3505 struct intel_display_error_state *error); 3506 3507 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); 3508 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); 3509 int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, 3510 u32 reply_mask, u32 reply, int timeout_base_ms); 3511 3512 /* intel_sideband.c */ 3513 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); 3514 void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); 3515 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 3516 u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg); 3517 void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val); 3518 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); 3519 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3520 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); 3521 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3522 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); 3523 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3524 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg); 3525 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val); 3526 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 3527 enum intel_sbi_destination destination); 3528 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 3529 enum intel_sbi_destination destination); 3530 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3531 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3532 3533 /* intel_dpio_phy.c */ 3534 void bxt_port_to_phy_channel(enum port port, 3535 enum dpio_phy *phy, enum dpio_channel *ch); 3536 void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, 3537 enum port port, u32 margin, u32 scale, 3538 u32 enable, u32 deemphasis); 3539 void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy); 3540 void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy); 3541 bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, 3542 enum dpio_phy phy); 3543 bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv, 3544 enum dpio_phy phy); 3545 uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder, 3546 uint8_t lane_count); 3547 void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, 3548 uint8_t lane_lat_optim_mask); 3549 uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder); 3550 3551 void chv_set_phy_signal_level(struct intel_encoder *encoder, 3552 u32 deemph_reg_value, u32 margin_reg_value, 3553 bool uniq_trans_scale); 3554 void chv_data_lane_soft_reset(struct intel_encoder *encoder, 3555 bool reset); 3556 void chv_phy_pre_pll_enable(struct intel_encoder *encoder); 3557 void chv_phy_pre_encoder_enable(struct intel_encoder *encoder); 3558 void chv_phy_release_cl2_override(struct intel_encoder *encoder); 3559 void chv_phy_post_pll_disable(struct intel_encoder *encoder); 3560 3561 void vlv_set_phy_signal_level(struct intel_encoder *encoder, 3562 u32 demph_reg_value, u32 preemph_reg_value, 3563 u32 uniqtranscale_reg_value, u32 tx3_demph); 3564 void vlv_phy_pre_pll_enable(struct intel_encoder *encoder); 3565 void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder); 3566 void vlv_phy_reset_lanes(struct intel_encoder *encoder); 3567 3568 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3569 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3570 3571 #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) 3572 #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) 3573 3574 #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) 3575 #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) 3576 #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) 3577 #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) 3578 3579 #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) 3580 #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) 3581 #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) 3582 #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) 3583 3584 /* Be very careful with read/write 64-bit values. On 32-bit machines, they 3585 * will be implemented using 2 32-bit writes in an arbitrary order with 3586 * an arbitrary delay between them. This can cause the hardware to 3587 * act upon the intermediate value, possibly leading to corruption and 3588 * machine death. For this reason we do not support I915_WRITE64, or 3589 * dev_priv->uncore.funcs.mmio_writeq. 3590 * 3591 * When reading a 64-bit value as two 32-bit values, the delay may cause 3592 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that 3593 * occasionally a 64-bit register does not actualy support a full readq 3594 * and must be read using two 32-bit reads. 3595 * 3596 * You have been warned. 3597 */ 3598 #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3599 3600 #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3601 u32 upper, lower, old_upper, loop = 0; \ 3602 upper = I915_READ(upper_reg); \ 3603 do { \ 3604 old_upper = upper; \ 3605 lower = I915_READ(lower_reg); \ 3606 upper = I915_READ(upper_reg); \ 3607 } while (upper != old_upper && loop++ < 2); \ 3608 (u64)upper << 32 | lower; }) 3609 3610 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3611 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3612 3613 #define __raw_read(x, s) \ 3614 static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \ 3615 i915_reg_t reg) \ 3616 { \ 3617 return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3618 } 3619 3620 #define __raw_write(x, s) \ 3621 static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \ 3622 i915_reg_t reg, uint##x##_t val) \ 3623 { \ 3624 write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \ 3625 } 3626 __raw_read(8, b) 3627 __raw_read(16, w) 3628 __raw_read(32, l) 3629 __raw_read(64, q) 3630 3631 __raw_write(8, b) 3632 __raw_write(16, w) 3633 __raw_write(32, l) 3634 __raw_write(64, q) 3635 3636 #undef __raw_read 3637 #undef __raw_write 3638 3639 /* These are untraced mmio-accessors that are only valid to be used inside 3640 * critical sections, such as inside IRQ handlers, where forcewake is explicitly 3641 * controlled. 3642 * 3643 * Think twice, and think again, before using these. 3644 * 3645 * As an example, these accessors can possibly be used between: 3646 * 3647 * spin_lock_irq(&dev_priv->uncore.lock); 3648 * intel_uncore_forcewake_get__locked(); 3649 * 3650 * and 3651 * 3652 * intel_uncore_forcewake_put__locked(); 3653 * spin_unlock_irq(&dev_priv->uncore.lock); 3654 * 3655 * 3656 * Note: some registers may not need forcewake held, so 3657 * intel_uncore_forcewake_{get,put} can be omitted, see 3658 * intel_uncore_forcewake_for_reg(). 3659 * 3660 * Certain architectures will die if the same cacheline is concurrently accessed 3661 * by different clients (e.g. on Ivybridge). Access to registers should 3662 * therefore generally be serialised, by either the dev_priv->uncore.lock or 3663 * a more localised lock guarding all access to that bank of registers. 3664 */ 3665 #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__)) 3666 #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__)) 3667 #define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__)) 3668 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) 3669 3670 /* "Broadcast RGB" property */ 3671 #define INTEL_BROADCAST_RGB_AUTO 0 3672 #define INTEL_BROADCAST_RGB_FULL 1 3673 #define INTEL_BROADCAST_RGB_LIMITED 2 3674 3675 static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv) 3676 { 3677 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 3678 return VLV_VGACNTRL; 3679 else if (INTEL_GEN(dev_priv) >= 5) 3680 return CPU_VGACNTRL; 3681 else 3682 return VGACNTRL; 3683 } 3684 3685 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 3686 { 3687 unsigned long j = msecs_to_jiffies(m); 3688 3689 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3690 } 3691 3692 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 3693 { 3694 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 3695 } 3696 3697 static inline unsigned long 3698 timespec_to_jiffies_timeout(const struct timespec *value) 3699 { 3700 unsigned long j = timespec_to_jiffies(value); 3701 3702 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3703 } 3704 3705 /* 3706 * If you need to wait X milliseconds between events A and B, but event B 3707 * doesn't happen exactly after event A, you record the timestamp (jiffies) of 3708 * when event A happened, then just before event B you call this function and 3709 * pass the timestamp as the first argument, and X as the second argument. 3710 */ 3711 static inline void 3712 wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 3713 { 3714 unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 3715 3716 /* 3717 * Don't re-read the value of "jiffies" every time since it may change 3718 * behind our back and break the math. 3719 */ 3720 tmp_jiffies = jiffies; 3721 target_jiffies = timestamp_jiffies + 3722 msecs_to_jiffies_timeout(to_wait_ms); 3723 3724 if (time_after(target_jiffies, tmp_jiffies)) { 3725 remaining_jiffies = target_jiffies - tmp_jiffies; 3726 while (remaining_jiffies) 3727 remaining_jiffies = 3728 schedule_timeout_uninterruptible(remaining_jiffies); 3729 } 3730 } 3731 3732 static inline bool 3733 __i915_request_irq_complete(struct drm_i915_gem_request *req) 3734 { 3735 struct intel_engine_cs *engine = req->engine; 3736 3737 /* Before we do the heavier coherent read of the seqno, 3738 * check the value (hopefully) in the CPU cacheline. 3739 */ 3740 if (__i915_gem_request_completed(req)) 3741 return true; 3742 3743 /* Ensure our read of the seqno is coherent so that we 3744 * do not "miss an interrupt" (i.e. if this is the last 3745 * request and the seqno write from the GPU is not visible 3746 * by the time the interrupt fires, we will see that the 3747 * request is incomplete and go back to sleep awaiting 3748 * another interrupt that will never come.) 3749 * 3750 * Strictly, we only need to do this once after an interrupt, 3751 * but it is easier and safer to do it every time the waiter 3752 * is woken. 3753 */ 3754 if (engine->irq_seqno_barrier && 3755 rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current && 3756 cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) { 3757 struct task_struct *tsk; 3758 3759 /* The ordering of irq_posted versus applying the barrier 3760 * is crucial. The clearing of the current irq_posted must 3761 * be visible before we perform the barrier operation, 3762 * such that if a subsequent interrupt arrives, irq_posted 3763 * is reasserted and our task rewoken (which causes us to 3764 * do another __i915_request_irq_complete() immediately 3765 * and reapply the barrier). Conversely, if the clear 3766 * occurs after the barrier, then an interrupt that arrived 3767 * whilst we waited on the barrier would not trigger a 3768 * barrier on the next pass, and the read may not see the 3769 * seqno update. 3770 */ 3771 engine->irq_seqno_barrier(engine); 3772 3773 /* If we consume the irq, but we are no longer the bottom-half, 3774 * the real bottom-half may not have serialised their own 3775 * seqno check with the irq-barrier (i.e. may have inspected 3776 * the seqno before we believe it coherent since they see 3777 * irq_posted == false but we are still running). 3778 */ 3779 rcu_read_lock(); 3780 tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh); 3781 if (tsk && tsk != current) 3782 /* Note that if the bottom-half is changed as we 3783 * are sending the wake-up, the new bottom-half will 3784 * be woken by whomever made the change. We only have 3785 * to worry about when we steal the irq-posted for 3786 * ourself. 3787 */ 3788 wake_up_process(tsk); 3789 rcu_read_unlock(); 3790 3791 if (__i915_gem_request_completed(req)) 3792 return true; 3793 } 3794 3795 return false; 3796 } 3797 3798 void i915_memcpy_init_early(struct drm_i915_private *dev_priv); 3799 bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len); 3800 3801 /* i915_mm.c */ 3802 int remap_io_mapping(struct vm_area_struct *vma, 3803 unsigned long addr, unsigned long pfn, unsigned long size, 3804 struct io_mapping *iomap); 3805 3806 #define ptr_mask_bits(ptr) ({ \ 3807 unsigned long __v = (unsigned long)(ptr); \ 3808 (typeof(ptr))(__v & PAGE_MASK); \ 3809 }) 3810 3811 #define ptr_unpack_bits(ptr, bits) ({ \ 3812 unsigned long __v = (unsigned long)(ptr); \ 3813 (bits) = __v & ~PAGE_MASK; \ 3814 (typeof(ptr))(__v & PAGE_MASK); \ 3815 }) 3816 3817 #define ptr_pack_bits(ptr, bits) \ 3818 ((typeof(ptr))((unsigned long)(ptr) | (bits))) 3819 3820 #define fetch_and_zero(ptr) ({ \ 3821 typeof(*ptr) __T = *(ptr); \ 3822 *(ptr) = (typeof(*ptr))0; \ 3823 __T; \ 3824 }) 3825 3826 #endif 3827