1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi/drm/i915_drm.h> 34 35 #include <linux/pm_qos.h> 36 37 #include <drm/ttm/ttm_device.h> 38 39 #include "display/intel_display_limits.h" 40 #include "display/intel_display_core.h" 41 42 #include "gem/i915_gem_context_types.h" 43 #include "gem/i915_gem_shrinker.h" 44 #include "gem/i915_gem_stolen.h" 45 46 #include "gt/intel_engine.h" 47 #include "gt/intel_gt_types.h" 48 #include "gt/intel_region_lmem.h" 49 #include "gt/intel_workarounds.h" 50 #include "gt/uc/intel_uc.h" 51 52 #include "soc/intel_pch.h" 53 54 #include "i915_drm_client.h" 55 #include "i915_gem.h" 56 #include "i915_gpu_error.h" 57 #include "i915_params.h" 58 #include "i915_perf_types.h" 59 #include "i915_scheduler.h" 60 #include "i915_utils.h" 61 #include "intel_device_info.h" 62 #include "intel_memory_region.h" 63 #include "intel_runtime_pm.h" 64 #include "intel_step.h" 65 #include "intel_uncore.h" 66 67 struct drm_i915_clock_gating_funcs; 68 struct vlv_s0ix_state; 69 struct intel_pxp; 70 71 #define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0) 72 73 /* Data Stolen Memory (DSM) aka "i915 stolen memory" */ 74 struct i915_dsm { 75 /* 76 * The start and end of DSM which we can optionally use to create GEM 77 * objects backed by stolen memory. 78 * 79 * Note that usable_size tells us exactly how much of this we are 80 * actually allowed to use, given that some portion of it is in fact 81 * reserved for use by hardware functions. 82 */ 83 struct resource stolen; 84 85 /* 86 * Reserved portion of DSM. 87 */ 88 struct resource reserved; 89 90 /* 91 * Total size minus reserved ranges. 92 * 93 * DSM is segmented in hardware with different portions offlimits to 94 * certain functions. 95 * 96 * The drm_mm is initialised to the total accessible range, as found 97 * from the PCI config. On Broadwell+, this is further restricted to 98 * avoid the first page! The upper end of DSM is reserved for hardware 99 * functions and similarly removed from the accessible range. 100 */ 101 resource_size_t usable_size; 102 }; 103 104 #define MAX_L3_SLICES 2 105 struct intel_l3_parity { 106 u32 *remap_info[MAX_L3_SLICES]; 107 struct work_struct error_work; 108 int which_slice; 109 }; 110 111 struct i915_gem_mm { 112 /* 113 * Shortcut for the stolen region. This points to either 114 * INTEL_REGION_STOLEN_SMEM for integrated platforms, or 115 * INTEL_REGION_STOLEN_LMEM for discrete, or NULL if the device doesn't 116 * support stolen. 117 */ 118 struct intel_memory_region *stolen_region; 119 /** Memory allocator for GTT stolen memory */ 120 struct drm_mm stolen; 121 /** Protects the usage of the GTT stolen memory allocator. This is 122 * always the inner lock when overlapping with struct_mutex. */ 123 struct mutex stolen_lock; 124 125 /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */ 126 spinlock_t obj_lock; 127 128 /** 129 * List of objects which are purgeable. 130 */ 131 struct list_head purge_list; 132 133 /** 134 * List of objects which have allocated pages and are shrinkable. 135 */ 136 struct list_head shrink_list; 137 138 /** 139 * List of objects which are pending destruction. 140 */ 141 struct llist_head free_list; 142 struct work_struct free_work; 143 /** 144 * Count of objects pending destructions. Used to skip needlessly 145 * waiting on an RCU barrier if no objects are waiting to be freed. 146 */ 147 atomic_t free_count; 148 149 /** 150 * tmpfs instance used for shmem backed objects 151 */ 152 struct vfsmount *gemfs; 153 154 struct intel_memory_region *regions[INTEL_REGION_UNKNOWN]; 155 156 struct notifier_block oom_notifier; 157 struct notifier_block vmap_notifier; 158 struct shrinker *shrinker; 159 160 /* shrinker accounting, also useful for userland debugging */ 161 u64 shrink_memory; 162 u32 shrink_count; 163 }; 164 165 struct i915_virtual_gpu { 166 struct mutex lock; /* serialises sending of g2v_notify command pkts */ 167 bool active; 168 u32 caps; 169 u32 *initial_mmio; 170 u8 *initial_cfg_space; 171 struct list_head entry; 172 }; 173 174 struct i915_selftest_stash { 175 atomic_t counter; 176 struct ida mock_region_instances; 177 }; 178 179 struct drm_i915_private { 180 struct drm_device drm; 181 182 struct intel_display display; 183 184 /* FIXME: Device release actions should all be moved to drmm_ */ 185 bool do_release; 186 187 /* i915 device parameters */ 188 struct i915_params params; 189 190 const struct intel_device_info *__info; /* Use INTEL_INFO() to access. */ 191 struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */ 192 struct intel_driver_caps caps; 193 194 struct i915_dsm dsm; 195 196 struct intel_uncore uncore; 197 struct intel_uncore_mmio_debug mmio_debug; 198 199 struct i915_virtual_gpu vgpu; 200 201 struct intel_gvt *gvt; 202 203 struct { 204 struct pci_dev *pdev; 205 struct resource mch_res; 206 bool mchbar_need_disable; 207 } gmch; 208 209 /* 210 * Chaining user engines happens in multiple stages, starting with a 211 * simple lock-less linked list created by intel_engine_add_user(), 212 * which later gets sorted and converted to an intermediate regular 213 * list, just to be converted once again to its final rb tree structure 214 * in intel_engines_driver_register(). 215 * 216 * Make sure to use the right iterator helper, depending on if the code 217 * in question runs before or after intel_engines_driver_register() -- 218 * for_each_uabi_engine() can only be used afterwards! 219 */ 220 union { 221 struct llist_head uabi_engines_llist; 222 struct list_head uabi_engines_list; 223 struct rb_root uabi_engines; 224 }; 225 unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1]; 226 227 /* protects the irq masks */ 228 spinlock_t irq_lock; 229 bool irqs_enabled; 230 231 /* LPT/WPT IOSF sideband protection */ 232 struct mutex sbi_lock; 233 234 /* VLV/CHV IOSF sideband */ 235 struct { 236 struct mutex lock; /* protect sideband access */ 237 struct pm_qos_request qos; 238 } vlv_iosf_sb; 239 240 /* Sideband mailbox protection */ 241 struct mutex sb_lock; 242 243 /** Cached value of IMR to avoid reads in updating the bitfield */ 244 u32 irq_mask; 245 246 bool preserve_bios_swizzle; 247 248 unsigned int fsb_freq, mem_freq, is_ddr3; 249 250 unsigned int hpll_freq; 251 unsigned int czclk_freq; 252 253 /** 254 * wq - Driver workqueue for GEM. 255 * 256 * NOTE: Work items scheduled here are not allowed to grab any modeset 257 * locks, for otherwise the flushing done in the pageflip code will 258 * result in deadlocks. 259 */ 260 struct workqueue_struct *wq; 261 262 /** 263 * unordered_wq - internal workqueue for unordered work 264 * 265 * This workqueue should be used for all unordered work 266 * scheduling within i915, which used to be scheduled on the 267 * system_wq before moving to a driver instance due 268 * deprecation of flush_scheduled_work(). 269 */ 270 struct workqueue_struct *unordered_wq; 271 272 /* pm private clock gating functions */ 273 const struct drm_i915_clock_gating_funcs *clock_gating_funcs; 274 275 /* PCH chipset type */ 276 enum intel_pch pch_type; 277 unsigned short pch_id; 278 279 unsigned long gem_quirks; 280 281 struct i915_gem_mm mm; 282 283 struct intel_l3_parity l3_parity; 284 285 /* 286 * edram size in MB. 287 * Cannot be determined by PCIID. You must always read a register. 288 */ 289 u32 edram_size_mb; 290 291 struct i915_gpu_error gpu_error; 292 293 u32 suspend_count; 294 struct vlv_s0ix_state *vlv_s0ix_state; 295 296 struct dram_info { 297 bool wm_lv_0_adjust_needed; 298 u8 num_channels; 299 bool symmetric_memory; 300 enum intel_dram_type { 301 INTEL_DRAM_UNKNOWN, 302 INTEL_DRAM_DDR3, 303 INTEL_DRAM_DDR4, 304 INTEL_DRAM_LPDDR3, 305 INTEL_DRAM_LPDDR4, 306 INTEL_DRAM_DDR5, 307 INTEL_DRAM_LPDDR5, 308 INTEL_DRAM_GDDR, 309 } type; 310 u8 num_qgv_points; 311 u8 num_psf_gv_points; 312 } dram_info; 313 314 struct intel_runtime_pm runtime_pm; 315 316 struct i915_perf perf; 317 318 struct i915_hwmon *hwmon; 319 320 struct intel_gt *gt[I915_MAX_GT]; 321 322 struct kobject *sysfs_gt; 323 324 /* Quick lookup of media GT (current platforms only have one) */ 325 struct intel_gt *media_gt; 326 327 struct { 328 struct i915_gem_contexts { 329 spinlock_t lock; /* locks list */ 330 struct list_head list; 331 } contexts; 332 333 /* 334 * We replace the local file with a global mappings as the 335 * backing storage for the mmap is on the device and not 336 * on the struct file, and we do not want to prolong the 337 * lifetime of the local fd. To minimise the number of 338 * anonymous inodes we create, we use a global singleton to 339 * share the global mapping. 340 */ 341 struct file *mmap_singleton; 342 } gem; 343 344 struct intel_pxp *pxp; 345 346 struct i915_pmu pmu; 347 348 /* The TTM device structure. */ 349 struct ttm_device bdev; 350 351 I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;) 352 353 /* 354 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 355 * will be rejected. Instead look for a better place. 356 */ 357 }; 358 359 static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 360 { 361 return container_of(dev, struct drm_i915_private, drm); 362 } 363 364 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) 365 { 366 struct drm_device *drm = dev_get_drvdata(kdev); 367 368 return drm ? to_i915(drm) : NULL; 369 } 370 371 static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) 372 { 373 struct drm_device *drm = pci_get_drvdata(pdev); 374 375 return drm ? to_i915(drm) : NULL; 376 } 377 378 static inline struct intel_gt *to_gt(const struct drm_i915_private *i915) 379 { 380 return i915->gt[0]; 381 } 382 383 #define rb_to_uabi_engine(rb) \ 384 rb_entry_safe(rb, struct intel_engine_cs, uabi_node) 385 386 #define for_each_uabi_engine(engine__, i915__) \ 387 for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\ 388 (engine__); \ 389 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) 390 391 #define INTEL_INFO(i915) ((i915)->__info) 392 #define RUNTIME_INFO(i915) (&(i915)->__runtime) 393 #define DRIVER_CAPS(i915) (&(i915)->caps) 394 395 #define INTEL_DEVID(i915) (RUNTIME_INFO(i915)->device_id) 396 397 #define IP_VER(ver, rel) ((ver) << 8 | (rel)) 398 399 #define GRAPHICS_VER(i915) (RUNTIME_INFO(i915)->graphics.ip.ver) 400 #define GRAPHICS_VER_FULL(i915) IP_VER(RUNTIME_INFO(i915)->graphics.ip.ver, \ 401 RUNTIME_INFO(i915)->graphics.ip.rel) 402 #define IS_GRAPHICS_VER(i915, from, until) \ 403 (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until)) 404 405 #define MEDIA_VER(i915) (RUNTIME_INFO(i915)->media.ip.ver) 406 #define MEDIA_VER_FULL(i915) IP_VER(RUNTIME_INFO(i915)->media.ip.ver, \ 407 RUNTIME_INFO(i915)->media.ip.rel) 408 #define IS_MEDIA_VER(i915, from, until) \ 409 (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until)) 410 411 #define INTEL_REVID(i915) (to_pci_dev((i915)->drm.dev)->revision) 412 413 #define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step) 414 #define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step) 415 416 #define IS_GRAPHICS_STEP(__i915, since, until) \ 417 (drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \ 418 INTEL_GRAPHICS_STEP(__i915) >= (since) && INTEL_GRAPHICS_STEP(__i915) < (until)) 419 420 #define IS_MEDIA_STEP(__i915, since, until) \ 421 (drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \ 422 INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until)) 423 424 static __always_inline unsigned int 425 __platform_mask_index(const struct intel_runtime_info *info, 426 enum intel_platform p) 427 { 428 const unsigned int pbits = 429 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS; 430 431 /* Expand the platform_mask array if this fails. */ 432 BUILD_BUG_ON(INTEL_MAX_PLATFORMS > 433 pbits * ARRAY_SIZE(info->platform_mask)); 434 435 return p / pbits; 436 } 437 438 static __always_inline unsigned int 439 __platform_mask_bit(const struct intel_runtime_info *info, 440 enum intel_platform p) 441 { 442 const unsigned int pbits = 443 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS; 444 445 return p % pbits + INTEL_SUBPLATFORM_BITS; 446 } 447 448 static inline u32 449 intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p) 450 { 451 const unsigned int pi = __platform_mask_index(info, p); 452 453 return info->platform_mask[pi] & INTEL_SUBPLATFORM_MASK; 454 } 455 456 static __always_inline bool 457 IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p) 458 { 459 const struct intel_runtime_info *info = RUNTIME_INFO(i915); 460 const unsigned int pi = __platform_mask_index(info, p); 461 const unsigned int pb = __platform_mask_bit(info, p); 462 463 BUILD_BUG_ON(!__builtin_constant_p(p)); 464 465 return info->platform_mask[pi] & BIT(pb); 466 } 467 468 static __always_inline bool 469 IS_SUBPLATFORM(const struct drm_i915_private *i915, 470 enum intel_platform p, unsigned int s) 471 { 472 const struct intel_runtime_info *info = RUNTIME_INFO(i915); 473 const unsigned int pi = __platform_mask_index(info, p); 474 const unsigned int pb = __platform_mask_bit(info, p); 475 const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1; 476 const u32 mask = info->platform_mask[pi]; 477 478 BUILD_BUG_ON(!__builtin_constant_p(p)); 479 BUILD_BUG_ON(!__builtin_constant_p(s)); 480 BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS); 481 482 /* Shift and test on the MSB position so sign flag can be used. */ 483 return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb); 484 } 485 486 #define IS_MOBILE(i915) (INTEL_INFO(i915)->is_mobile) 487 #define IS_DGFX(i915) (INTEL_INFO(i915)->is_dgfx) 488 489 #define IS_I830(i915) IS_PLATFORM(i915, INTEL_I830) 490 #define IS_I845G(i915) IS_PLATFORM(i915, INTEL_I845G) 491 #define IS_I85X(i915) IS_PLATFORM(i915, INTEL_I85X) 492 #define IS_I865G(i915) IS_PLATFORM(i915, INTEL_I865G) 493 #define IS_I915G(i915) IS_PLATFORM(i915, INTEL_I915G) 494 #define IS_I915GM(i915) IS_PLATFORM(i915, INTEL_I915GM) 495 #define IS_I945G(i915) IS_PLATFORM(i915, INTEL_I945G) 496 #define IS_I945GM(i915) IS_PLATFORM(i915, INTEL_I945GM) 497 #define IS_I965G(i915) IS_PLATFORM(i915, INTEL_I965G) 498 #define IS_I965GM(i915) IS_PLATFORM(i915, INTEL_I965GM) 499 #define IS_G45(i915) IS_PLATFORM(i915, INTEL_G45) 500 #define IS_GM45(i915) IS_PLATFORM(i915, INTEL_GM45) 501 #define IS_G4X(i915) (IS_G45(i915) || IS_GM45(i915)) 502 #define IS_PINEVIEW(i915) IS_PLATFORM(i915, INTEL_PINEVIEW) 503 #define IS_G33(i915) IS_PLATFORM(i915, INTEL_G33) 504 #define IS_IRONLAKE(i915) IS_PLATFORM(i915, INTEL_IRONLAKE) 505 #define IS_IRONLAKE_M(i915) \ 506 (IS_PLATFORM(i915, INTEL_IRONLAKE) && IS_MOBILE(i915)) 507 #define IS_SANDYBRIDGE(i915) IS_PLATFORM(i915, INTEL_SANDYBRIDGE) 508 #define IS_IVYBRIDGE(i915) IS_PLATFORM(i915, INTEL_IVYBRIDGE) 509 #define IS_VALLEYVIEW(i915) IS_PLATFORM(i915, INTEL_VALLEYVIEW) 510 #define IS_CHERRYVIEW(i915) IS_PLATFORM(i915, INTEL_CHERRYVIEW) 511 #define IS_HASWELL(i915) IS_PLATFORM(i915, INTEL_HASWELL) 512 #define IS_BROADWELL(i915) IS_PLATFORM(i915, INTEL_BROADWELL) 513 #define IS_SKYLAKE(i915) IS_PLATFORM(i915, INTEL_SKYLAKE) 514 #define IS_BROXTON(i915) IS_PLATFORM(i915, INTEL_BROXTON) 515 #define IS_KABYLAKE(i915) IS_PLATFORM(i915, INTEL_KABYLAKE) 516 #define IS_GEMINILAKE(i915) IS_PLATFORM(i915, INTEL_GEMINILAKE) 517 #define IS_COFFEELAKE(i915) IS_PLATFORM(i915, INTEL_COFFEELAKE) 518 #define IS_COMETLAKE(i915) IS_PLATFORM(i915, INTEL_COMETLAKE) 519 #define IS_ICELAKE(i915) IS_PLATFORM(i915, INTEL_ICELAKE) 520 #define IS_JASPERLAKE(i915) IS_PLATFORM(i915, INTEL_JASPERLAKE) 521 #define IS_ELKHARTLAKE(i915) IS_PLATFORM(i915, INTEL_ELKHARTLAKE) 522 #define IS_TIGERLAKE(i915) IS_PLATFORM(i915, INTEL_TIGERLAKE) 523 #define IS_ROCKETLAKE(i915) IS_PLATFORM(i915, INTEL_ROCKETLAKE) 524 #define IS_DG1(i915) IS_PLATFORM(i915, INTEL_DG1) 525 #define IS_ALDERLAKE_S(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_S) 526 #define IS_ALDERLAKE_P(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_P) 527 #define IS_DG2(i915) IS_PLATFORM(i915, INTEL_DG2) 528 #define IS_METEORLAKE(i915) IS_PLATFORM(i915, INTEL_METEORLAKE) 529 /* 530 * Display code shared by i915 and Xe relies on macros like IS_LUNARLAKE, 531 * so we need to define these even on platforms that the i915 base driver 532 * doesn't support. Ensure the parameter is used in the definition to 533 * avoid 'unused variable' warnings when compiling the shared display code 534 * for i915. 535 */ 536 #define IS_LUNARLAKE(i915) (0 && i915) 537 #define IS_BATTLEMAGE(i915) (0 && i915) 538 #define IS_PANTHERLAKE(i915) (0 && i915) 539 540 #define IS_ARROWLAKE_H(i915) \ 541 IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_H) 542 #define IS_ARROWLAKE_U(i915) \ 543 IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_U) 544 #define IS_ARROWLAKE_S(i915) \ 545 IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_S) 546 #define IS_DG2_G10(i915) \ 547 IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G10) 548 #define IS_DG2_G11(i915) \ 549 IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G11) 550 #define IS_DG2_G12(i915) \ 551 IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G12) 552 #define IS_DG2_D(i915) \ 553 IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_D) 554 #define IS_RAPTORLAKE_S(i915) \ 555 IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL) 556 #define IS_ALDERLAKE_P_N(i915) \ 557 IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N) 558 #define IS_RAPTORLAKE_P(i915) \ 559 IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL) 560 #define IS_RAPTORLAKE_U(i915) \ 561 IS_SUBPLATFORM(i915, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPLU) 562 #define IS_HASWELL_EARLY_SDV(i915) (IS_HASWELL(i915) && \ 563 (INTEL_DEVID(i915) & 0xFF00) == 0x0C00) 564 #define IS_BROADWELL_ULT(i915) \ 565 IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT) 566 #define IS_BROADWELL_ULX(i915) \ 567 IS_SUBPLATFORM(i915, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX) 568 #define IS_HASWELL_ULT(i915) \ 569 IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT) 570 /* ULX machines are also considered ULT. */ 571 #define IS_HASWELL_ULX(i915) \ 572 IS_SUBPLATFORM(i915, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX) 573 #define IS_SKYLAKE_ULT(i915) \ 574 IS_SUBPLATFORM(i915, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT) 575 #define IS_SKYLAKE_ULX(i915) \ 576 IS_SUBPLATFORM(i915, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX) 577 #define IS_KABYLAKE_ULT(i915) \ 578 IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT) 579 #define IS_KABYLAKE_ULX(i915) \ 580 IS_SUBPLATFORM(i915, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX) 581 #define IS_COFFEELAKE_ULT(i915) \ 582 IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT) 583 #define IS_COFFEELAKE_ULX(i915) \ 584 IS_SUBPLATFORM(i915, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX) 585 #define IS_COMETLAKE_ULT(i915) \ 586 IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT) 587 #define IS_COMETLAKE_ULX(i915) \ 588 IS_SUBPLATFORM(i915, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX) 589 590 #define IS_ICL_WITH_PORT_F(i915) \ 591 IS_SUBPLATFORM(i915, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF) 592 593 #define IS_TIGERLAKE_UY(i915) \ 594 IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY) 595 596 #define IS_GEN9_LP(i915) (IS_BROXTON(i915) || IS_GEMINILAKE(i915)) 597 #define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_GEN9_LP(i915)) 598 599 #define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id)) 600 #define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id) 601 602 #define __ENGINE_INSTANCES_MASK(mask, first, count) ({ \ 603 unsigned int first__ = (first); \ 604 unsigned int count__ = (count); \ 605 ((mask) & GENMASK(first__ + count__ - 1, first__)) >> first__; \ 606 }) 607 608 #define ENGINE_INSTANCES_MASK(gt, first, count) \ 609 __ENGINE_INSTANCES_MASK((gt)->info.engine_mask, first, count) 610 611 #define RCS_MASK(gt) \ 612 ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS) 613 #define BCS_MASK(gt) \ 614 ENGINE_INSTANCES_MASK(gt, BCS0, I915_MAX_BCS) 615 #define VDBOX_MASK(gt) \ 616 ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS) 617 #define VEBOX_MASK(gt) \ 618 ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS) 619 #define CCS_MASK(gt) \ 620 ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS) 621 622 #define HAS_MEDIA_RATIO_MODE(i915) (INTEL_INFO(i915)->has_media_ratio_mode) 623 624 /* 625 * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution 626 * All later gens can run the final buffer from the ppgtt 627 */ 628 #define CMDPARSER_USES_GGTT(i915) (GRAPHICS_VER(i915) == 7) 629 630 #define HAS_LLC(i915) (INTEL_INFO(i915)->has_llc) 631 #define HAS_SNOOP(i915) (INTEL_INFO(i915)->has_snoop) 632 #define HAS_EDRAM(i915) ((i915)->edram_size_mb) 633 #define HAS_SECURE_BATCHES(i915) (GRAPHICS_VER(i915) < 6) 634 #define HAS_WT(i915) HAS_EDRAM(i915) 635 636 #define HWS_NEEDS_PHYSICAL(i915) (INTEL_INFO(i915)->hws_needs_physical) 637 638 #define HAS_LOGICAL_RING_CONTEXTS(i915) \ 639 (INTEL_INFO(i915)->has_logical_ring_contexts) 640 #define HAS_LOGICAL_RING_ELSQ(i915) \ 641 (INTEL_INFO(i915)->has_logical_ring_elsq) 642 643 #define HAS_EXECLISTS(i915) HAS_LOGICAL_RING_CONTEXTS(i915) 644 645 #define INTEL_PPGTT(i915) (RUNTIME_INFO(i915)->ppgtt_type) 646 #define HAS_PPGTT(i915) \ 647 (INTEL_PPGTT(i915) != INTEL_PPGTT_NONE) 648 #define HAS_FULL_PPGTT(i915) \ 649 (INTEL_PPGTT(i915) >= INTEL_PPGTT_FULL) 650 651 #define HAS_PAGE_SIZES(i915, sizes) ({ \ 652 GEM_BUG_ON((sizes) == 0); \ 653 ((sizes) & ~RUNTIME_INFO(i915)->page_sizes) == 0; \ 654 }) 655 656 #define NEEDS_RC6_CTX_CORRUPTION_WA(i915) \ 657 (IS_BROADWELL(i915) || GRAPHICS_VER(i915) == 9) 658 659 /* WaRsDisableCoarsePowerGating:skl,cnl */ 660 #define NEEDS_WaRsDisableCoarsePowerGating(i915) \ 661 (IS_SKYLAKE(i915) && (INTEL_INFO(i915)->gt == 3 || INTEL_INFO(i915)->gt == 4)) 662 663 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 664 * rows, which changed the alignment requirements and fence programming. 665 */ 666 #define HAS_128_BYTE_Y_TILING(i915) (GRAPHICS_VER(i915) != 2 && \ 667 !(IS_I915G(i915) || IS_I915GM(i915))) 668 669 #define HAS_RC6(i915) (INTEL_INFO(i915)->has_rc6) 670 #define HAS_RC6p(i915) (INTEL_INFO(i915)->has_rc6p) 671 #define HAS_RC6pp(i915) (false) /* HW was never validated */ 672 673 #define HAS_RPS(i915) (INTEL_INFO(i915)->has_rps) 674 675 #define HAS_PXP(i915) \ 676 (IS_ENABLED(CONFIG_DRM_I915_PXP) && INTEL_INFO(i915)->has_pxp) 677 678 #define HAS_HECI_PXP(i915) \ 679 (INTEL_INFO(i915)->has_heci_pxp) 680 681 #define HAS_HECI_GSCFI(i915) \ 682 (INTEL_INFO(i915)->has_heci_gscfi) 683 684 #define HAS_HECI_GSC(i915) (HAS_HECI_PXP(i915) || HAS_HECI_GSCFI(i915)) 685 686 #define HAS_RUNTIME_PM(i915) (INTEL_INFO(i915)->has_runtime_pm) 687 #define HAS_64BIT_RELOC(i915) (INTEL_INFO(i915)->has_64bit_reloc) 688 689 #define HAS_OA_BPC_REPORTING(i915) \ 690 (INTEL_INFO(i915)->has_oa_bpc_reporting) 691 #define HAS_OA_SLICE_CONTRIB_LIMITS(i915) \ 692 (INTEL_INFO(i915)->has_oa_slice_contrib_limits) 693 #define HAS_OAM(i915) \ 694 (INTEL_INFO(i915)->has_oam) 695 696 /* 697 * Set this flag, when platform requires 64K GTT page sizes or larger for 698 * device local memory access. 699 */ 700 #define HAS_64K_PAGES(i915) (INTEL_INFO(i915)->has_64k_pages) 701 702 #define HAS_REGION(i915, id) (INTEL_INFO(i915)->memory_regions & BIT(id)) 703 #define HAS_LMEM(i915) HAS_REGION(i915, INTEL_REGION_LMEM_0) 704 705 #define HAS_EXTRA_GT_LIST(i915) (INTEL_INFO(i915)->extra_gt_list) 706 707 /* 708 * Platform has the dedicated compression control state for each lmem surfaces 709 * stored in lmem to support the 3D and media compression formats. 710 */ 711 #define HAS_FLAT_CCS(i915) (INTEL_INFO(i915)->has_flat_ccs) 712 713 #define HAS_GT_UC(i915) (INTEL_INFO(i915)->has_gt_uc) 714 715 #define HAS_POOLED_EU(i915) (RUNTIME_INFO(i915)->has_pooled_eu) 716 717 #define HAS_GLOBAL_MOCS_REGISTERS(i915) (INTEL_INFO(i915)->has_global_mocs) 718 719 #define HAS_GMD_ID(i915) (INTEL_INFO(i915)->has_gmd_id) 720 721 #define HAS_L3_CCS_READ(i915) (INTEL_INFO(i915)->has_l3_ccs_read) 722 723 /* DPF == dynamic parity feature */ 724 #define HAS_L3_DPF(i915) (INTEL_INFO(i915)->has_l3_dpf) 725 #define NUM_L3_SLICES(i915) (IS_HASWELL(i915) && INTEL_INFO(i915)->gt == 3 ? \ 726 2 : HAS_L3_DPF(i915)) 727 728 #define HAS_GUC_DEPRIVILEGE(i915) \ 729 (INTEL_INFO(i915)->has_guc_deprivilege) 730 731 #define HAS_GUC_TLB_INVALIDATION(i915) (INTEL_INFO(i915)->has_guc_tlb_invalidation) 732 733 #define HAS_3D_PIPELINE(i915) (INTEL_INFO(i915)->has_3d_pipeline) 734 735 #define HAS_ONE_EU_PER_FUSE_BIT(i915) (INTEL_INFO(i915)->has_one_eu_per_fuse_bit) 736 737 #define HAS_LMEMBAR_SMEM_STOLEN(i915) (!HAS_LMEM(i915) && \ 738 GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) 739 740 #endif 741