1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2008-2012 Intel Corporation 4 */ 5 6 #include <linux/errno.h> 7 #include <linux/mutex.h> 8 9 #include <drm/drm_mm.h> 10 #include <drm/intel/i915_drm.h> 11 12 #include "gem/i915_gem_lmem.h" 13 #include "gem/i915_gem_region.h" 14 #include "gt/intel_gt.h" 15 #include "gt/intel_gt_mcr.h" 16 #include "gt/intel_gt_regs.h" 17 #include "gt/intel_region_lmem.h" 18 #include "i915_drv.h" 19 #include "i915_gem_stolen.h" 20 #include "i915_pci.h" 21 #include "i915_reg.h" 22 #include "i915_utils.h" 23 #include "i915_vgpu.h" 24 #include "intel_mchbar_regs.h" 25 #include "intel_pci_config.h" 26 27 /* 28 * The BIOS typically reserves some of the system's memory for the exclusive 29 * use of the integrated graphics. This memory is no longer available for 30 * use by the OS and so the user finds that his system has less memory 31 * available than he put in. We refer to this memory as stolen. 32 * 33 * The BIOS will allocate its framebuffer from the stolen memory. Our 34 * goal is try to reuse that object for our own fbcon which must always 35 * be available for panics. Anything else we can reuse the stolen memory 36 * for is a boon. 37 */ 38 39 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915, 40 struct drm_mm_node *node, u64 size, 41 unsigned alignment, u64 start, u64 end) 42 { 43 int ret; 44 45 if (!drm_mm_initialized(&i915->mm.stolen)) 46 return -ENODEV; 47 48 /* WaSkipStolenMemoryFirstPage:bdw+ */ 49 if (GRAPHICS_VER(i915) >= 8 && start < 4096) 50 start = 4096; 51 52 mutex_lock(&i915->mm.stolen_lock); 53 ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node, 54 size, alignment, 0, 55 start, end, DRM_MM_INSERT_BEST); 56 mutex_unlock(&i915->mm.stolen_lock); 57 58 return ret; 59 } 60 61 int i915_gem_stolen_insert_node(struct drm_i915_private *i915, 62 struct drm_mm_node *node, u64 size, 63 unsigned alignment) 64 { 65 return i915_gem_stolen_insert_node_in_range(i915, node, 66 size, alignment, 67 I915_GEM_STOLEN_BIAS, 68 U64_MAX); 69 } 70 71 void i915_gem_stolen_remove_node(struct drm_i915_private *i915, 72 struct drm_mm_node *node) 73 { 74 mutex_lock(&i915->mm.stolen_lock); 75 drm_mm_remove_node(node); 76 mutex_unlock(&i915->mm.stolen_lock); 77 } 78 79 static bool valid_stolen_size(struct drm_i915_private *i915, struct resource *dsm) 80 { 81 return (dsm->start != 0 || HAS_LMEMBAR_SMEM_STOLEN(i915)) && dsm->end > dsm->start; 82 } 83 84 static int adjust_stolen(struct drm_i915_private *i915, 85 struct resource *dsm) 86 { 87 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 88 struct intel_uncore *uncore = ggtt->vm.gt->uncore; 89 90 if (!valid_stolen_size(i915, dsm)) 91 return -EINVAL; 92 93 /* 94 * Make sure we don't clobber the GTT if it's within stolen memory 95 * 96 * TODO: We have yet too encounter the case where the GTT wasn't at the 97 * end of stolen. With that assumption we could simplify this. 98 */ 99 if (GRAPHICS_VER(i915) <= 4 && 100 !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) { 101 struct resource stolen[2] = {*dsm, *dsm}; 102 struct resource ggtt_res; 103 resource_size_t ggtt_start; 104 105 ggtt_start = intel_uncore_read(uncore, PGTBL_CTL); 106 if (GRAPHICS_VER(i915) == 4) 107 ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | 108 (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; 109 else 110 ggtt_start &= PGTBL_ADDRESS_LO_MASK; 111 112 ggtt_res = DEFINE_RES_MEM(ggtt_start, ggtt_total_entries(ggtt) * 4); 113 114 if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end) 115 stolen[0].end = ggtt_res.start; 116 if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end) 117 stolen[1].start = ggtt_res.end; 118 119 /* Pick the larger of the two chunks */ 120 if (resource_size(&stolen[0]) > resource_size(&stolen[1])) 121 *dsm = stolen[0]; 122 else 123 *dsm = stolen[1]; 124 125 if (stolen[0].start != stolen[1].start || 126 stolen[0].end != stolen[1].end) { 127 drm_dbg(&i915->drm, 128 "GTT within stolen memory at %pR\n", 129 &ggtt_res); 130 drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n", 131 dsm); 132 } 133 } 134 135 if (!valid_stolen_size(i915, dsm)) 136 return -EINVAL; 137 138 return 0; 139 } 140 141 static int request_smem_stolen(struct drm_i915_private *i915, 142 struct resource *dsm) 143 { 144 struct resource *r; 145 146 /* 147 * With stolen lmem, we don't need to request system memory for the 148 * address range since it's local to the gpu. 149 * 150 * Starting MTL, in IGFX devices the stolen memory is exposed via 151 * LMEMBAR and shall be considered similar to stolen lmem. 152 */ 153 if (HAS_LMEM(i915) || HAS_LMEMBAR_SMEM_STOLEN(i915)) 154 return 0; 155 156 /* 157 * Verify that nothing else uses this physical address. Stolen 158 * memory should be reserved by the BIOS and hidden from the 159 * kernel. So if the region is already marked as busy, something 160 * is seriously wrong. 161 */ 162 r = devm_request_mem_region(i915->drm.dev, dsm->start, 163 resource_size(dsm), 164 "Graphics Stolen Memory"); 165 if (r == NULL) { 166 /* 167 * One more attempt but this time requesting region from 168 * start + 1, as we have seen that this resolves the region 169 * conflict with the PCI Bus. 170 * This is a BIOS w/a: Some BIOS wrap stolen in the root 171 * PCI bus, but have an off-by-one error. Hence retry the 172 * reservation starting from 1 instead of 0. 173 * There's also BIOS with off-by-one on the other end. 174 */ 175 r = devm_request_mem_region(i915->drm.dev, dsm->start + 1, 176 resource_size(dsm) - 2, 177 "Graphics Stolen Memory"); 178 /* 179 * GEN3 firmware likes to smash pci bridges into the stolen 180 * range. Apparently this works. 181 */ 182 if (!r && GRAPHICS_VER(i915) != 3) { 183 drm_err(&i915->drm, 184 "conflict detected with stolen region: %pR\n", 185 dsm); 186 187 return -EBUSY; 188 } 189 } 190 191 return 0; 192 } 193 194 static void i915_gem_cleanup_stolen(struct drm_i915_private *i915) 195 { 196 if (!drm_mm_initialized(&i915->mm.stolen)) 197 return; 198 199 drm_mm_takedown(&i915->mm.stolen); 200 } 201 202 static void g4x_get_stolen_reserved(struct drm_i915_private *i915, 203 struct intel_uncore *uncore, 204 resource_size_t *base, 205 resource_size_t *size) 206 { 207 u32 reg_val = intel_uncore_read(uncore, 208 IS_GM45(i915) ? 209 CTG_STOLEN_RESERVED : 210 ELK_STOLEN_RESERVED); 211 resource_size_t stolen_top = i915->dsm.stolen.end + 1; 212 213 drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n", 214 IS_GM45(i915) ? "CTG" : "ELK", reg_val); 215 216 if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) 217 return; 218 219 /* 220 * Whether ILK really reuses the ELK register for this is unclear. 221 * Let's see if we catch anyone with this supposedly enabled on ILK. 222 */ 223 drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5, 224 "ILK stolen reserved found? 0x%08x\n", 225 reg_val); 226 227 if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK)) 228 return; 229 230 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; 231 drm_WARN_ON(&i915->drm, 232 (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); 233 234 *size = stolen_top - *base; 235 } 236 237 static void gen6_get_stolen_reserved(struct drm_i915_private *i915, 238 struct intel_uncore *uncore, 239 resource_size_t *base, 240 resource_size_t *size) 241 { 242 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 243 244 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 245 246 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 247 return; 248 249 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 250 251 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) { 252 case GEN6_STOLEN_RESERVED_1M: 253 *size = 1024 * 1024; 254 break; 255 case GEN6_STOLEN_RESERVED_512K: 256 *size = 512 * 1024; 257 break; 258 case GEN6_STOLEN_RESERVED_256K: 259 *size = 256 * 1024; 260 break; 261 case GEN6_STOLEN_RESERVED_128K: 262 *size = 128 * 1024; 263 break; 264 default: 265 *size = 1024 * 1024; 266 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK); 267 } 268 } 269 270 static void vlv_get_stolen_reserved(struct drm_i915_private *i915, 271 struct intel_uncore *uncore, 272 resource_size_t *base, 273 resource_size_t *size) 274 { 275 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 276 resource_size_t stolen_top = i915->dsm.stolen.end + 1; 277 278 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 279 280 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 281 return; 282 283 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 284 default: 285 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 286 fallthrough; 287 case GEN7_STOLEN_RESERVED_1M: 288 *size = 1024 * 1024; 289 break; 290 } 291 292 /* 293 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the 294 * reserved location as (top - size). 295 */ 296 *base = stolen_top - *size; 297 } 298 299 static void gen7_get_stolen_reserved(struct drm_i915_private *i915, 300 struct intel_uncore *uncore, 301 resource_size_t *base, 302 resource_size_t *size) 303 { 304 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 305 306 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 307 308 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 309 return; 310 311 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK; 312 313 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { 314 case GEN7_STOLEN_RESERVED_1M: 315 *size = 1024 * 1024; 316 break; 317 case GEN7_STOLEN_RESERVED_256K: 318 *size = 256 * 1024; 319 break; 320 default: 321 *size = 1024 * 1024; 322 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); 323 } 324 } 325 326 static void chv_get_stolen_reserved(struct drm_i915_private *i915, 327 struct intel_uncore *uncore, 328 resource_size_t *base, 329 resource_size_t *size) 330 { 331 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 332 333 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 334 335 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 336 return; 337 338 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 339 340 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 341 case GEN8_STOLEN_RESERVED_1M: 342 *size = 1024 * 1024; 343 break; 344 case GEN8_STOLEN_RESERVED_2M: 345 *size = 2 * 1024 * 1024; 346 break; 347 case GEN8_STOLEN_RESERVED_4M: 348 *size = 4 * 1024 * 1024; 349 break; 350 case GEN8_STOLEN_RESERVED_8M: 351 *size = 8 * 1024 * 1024; 352 break; 353 default: 354 *size = 8 * 1024 * 1024; 355 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 356 } 357 } 358 359 static void bdw_get_stolen_reserved(struct drm_i915_private *i915, 360 struct intel_uncore *uncore, 361 resource_size_t *base, 362 resource_size_t *size) 363 { 364 u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); 365 resource_size_t stolen_top = i915->dsm.stolen.end + 1; 366 367 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); 368 369 if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) 370 return; 371 372 if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK)) 373 return; 374 375 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; 376 *size = stolen_top - *base; 377 } 378 379 static void icl_get_stolen_reserved(struct drm_i915_private *i915, 380 struct intel_uncore *uncore, 381 resource_size_t *base, 382 resource_size_t *size) 383 { 384 u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED); 385 386 drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); 387 388 /* Wa_14019821291 */ 389 if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) { 390 /* 391 * This workaround is primarily implemented by the BIOS. We 392 * just need to figure out whether the BIOS has applied the 393 * workaround (meaning the programmed address falls within 394 * the DSM) and, if so, reserve that part of the DSM to 395 * prevent accidental reuse. The DSM location should be just 396 * below the WOPCM. 397 */ 398 u64 gscpsmi_base = intel_uncore_read64_2x32(uncore, 399 MTL_GSCPSMI_BASEADDR_LSB, 400 MTL_GSCPSMI_BASEADDR_MSB); 401 if (gscpsmi_base >= i915->dsm.stolen.start && 402 gscpsmi_base < i915->dsm.stolen.end) { 403 *base = gscpsmi_base; 404 *size = i915->dsm.stolen.end - gscpsmi_base; 405 return; 406 } 407 } 408 409 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { 410 case GEN8_STOLEN_RESERVED_1M: 411 *size = 1024 * 1024; 412 break; 413 case GEN8_STOLEN_RESERVED_2M: 414 *size = 2 * 1024 * 1024; 415 break; 416 case GEN8_STOLEN_RESERVED_4M: 417 *size = 4 * 1024 * 1024; 418 break; 419 case GEN8_STOLEN_RESERVED_8M: 420 *size = 8 * 1024 * 1024; 421 break; 422 default: 423 *size = 8 * 1024 * 1024; 424 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); 425 } 426 427 if (HAS_LMEMBAR_SMEM_STOLEN(i915)) 428 /* the base is initialized to stolen top so subtract size to get base */ 429 *base -= *size; 430 else 431 *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK; 432 } 433 434 /* 435 * Initialize i915->dsm.reserved to contain the reserved space within the Data 436 * Stolen Memory. This is a range on the top of DSM that is reserved, not to 437 * be used by driver, so must be excluded from the region passed to the 438 * allocator later. In the spec this is also called as WOPCM. 439 * 440 * Our expectation is that the reserved space is at the top of the stolen 441 * region, as it has been the case for every platform, and *never* at the 442 * bottom, so the calculation here can be simplified. 443 */ 444 static int init_reserved_stolen(struct drm_i915_private *i915) 445 { 446 struct intel_uncore *uncore = &i915->uncore; 447 resource_size_t reserved_base, stolen_top; 448 resource_size_t reserved_size; 449 int ret = 0; 450 451 stolen_top = i915->dsm.stolen.end + 1; 452 reserved_base = stolen_top; 453 reserved_size = 0; 454 455 if (GRAPHICS_VER(i915) >= 11) { 456 icl_get_stolen_reserved(i915, uncore, 457 &reserved_base, &reserved_size); 458 } else if (GRAPHICS_VER(i915) >= 8) { 459 if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915) || IS_GEMINILAKE(i915)) 460 chv_get_stolen_reserved(i915, uncore, 461 &reserved_base, &reserved_size); 462 else 463 bdw_get_stolen_reserved(i915, uncore, 464 &reserved_base, &reserved_size); 465 } else if (GRAPHICS_VER(i915) >= 7) { 466 if (IS_VALLEYVIEW(i915)) 467 vlv_get_stolen_reserved(i915, uncore, 468 &reserved_base, &reserved_size); 469 else 470 gen7_get_stolen_reserved(i915, uncore, 471 &reserved_base, &reserved_size); 472 } else if (GRAPHICS_VER(i915) >= 6) { 473 gen6_get_stolen_reserved(i915, uncore, 474 &reserved_base, &reserved_size); 475 } else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) { 476 g4x_get_stolen_reserved(i915, uncore, 477 &reserved_base, &reserved_size); 478 } 479 480 /* No reserved stolen */ 481 if (reserved_base == stolen_top) 482 goto bail_out; 483 484 if (!reserved_base) { 485 drm_err(&i915->drm, 486 "inconsistent reservation %pa + %pa; ignoring\n", 487 &reserved_base, &reserved_size); 488 ret = -EINVAL; 489 goto bail_out; 490 } 491 492 i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, reserved_size); 493 494 if (!resource_contains(&i915->dsm.stolen, &i915->dsm.reserved)) { 495 drm_err(&i915->drm, 496 "Stolen reserved area %pR outside stolen memory %pR\n", 497 &i915->dsm.reserved, &i915->dsm.stolen); 498 ret = -EINVAL; 499 goto bail_out; 500 } 501 502 return 0; 503 504 bail_out: 505 i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, 0); 506 507 return ret; 508 } 509 510 static int i915_gem_init_stolen(struct intel_memory_region *mem) 511 { 512 struct drm_i915_private *i915 = mem->i915; 513 514 mutex_init(&i915->mm.stolen_lock); 515 516 if (intel_vgpu_active(i915)) { 517 drm_notice(&i915->drm, 518 "%s, disabling use of stolen memory\n", 519 "iGVT-g active"); 520 return -ENOSPC; 521 } 522 523 if (i915_vtd_active(i915) && GRAPHICS_VER(i915) < 8) { 524 drm_notice(&i915->drm, 525 "%s, disabling use of stolen memory\n", 526 "DMAR active"); 527 return -ENOSPC; 528 } 529 530 if (adjust_stolen(i915, &mem->region)) 531 return -ENOSPC; 532 533 if (request_smem_stolen(i915, &mem->region)) 534 return -ENOSPC; 535 536 i915->dsm.stolen = mem->region; 537 538 if (init_reserved_stolen(i915)) 539 return -ENOSPC; 540 541 /* Exclude the reserved region from driver use */ 542 mem->region.end = i915->dsm.reserved.start - 1; 543 mem->io = DEFINE_RES_MEM(mem->io.start, 544 min(resource_size(&mem->io), 545 resource_size(&mem->region))); 546 547 i915->dsm.usable_size = resource_size(&mem->region); 548 549 drm_dbg(&i915->drm, 550 "Memory reserved for graphics device: %lluK, usable: %lluK\n", 551 (u64)resource_size(&i915->dsm.stolen) >> 10, 552 (u64)i915->dsm.usable_size >> 10); 553 554 if (i915->dsm.usable_size == 0) 555 return -ENOSPC; 556 557 /* Basic memrange allocator for stolen space. */ 558 drm_mm_init(&i915->mm.stolen, 0, i915->dsm.usable_size); 559 560 /* 561 * Access to stolen lmem beyond certain size for MTL A0 stepping 562 * would crash the machine. Disable stolen lmem for userspace access 563 * by setting usable_size to zero. 564 */ 565 if (IS_METEORLAKE(i915) && INTEL_REVID(i915) == 0x0) 566 i915->dsm.usable_size = 0; 567 568 return 0; 569 } 570 571 static void dbg_poison(struct i915_ggtt *ggtt, 572 dma_addr_t addr, resource_size_t size, 573 u8 x) 574 { 575 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 576 if (!drm_mm_node_allocated(&ggtt->error_capture)) 577 return; 578 579 if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND) 580 return; /* beware stop_machine() inversion */ 581 582 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); 583 584 mutex_lock(&ggtt->error_mutex); 585 while (size) { 586 void __iomem *s; 587 588 ggtt->vm.insert_page(&ggtt->vm, addr, 589 ggtt->error_capture.start, 590 i915_gem_get_pat_index(ggtt->vm.i915, 591 I915_CACHE_NONE), 592 0); 593 mb(); 594 595 s = io_mapping_map_wc(&ggtt->iomap, 596 ggtt->error_capture.start, 597 PAGE_SIZE); 598 memset_io(s, x, PAGE_SIZE); 599 io_mapping_unmap(s); 600 601 addr += PAGE_SIZE; 602 size -= PAGE_SIZE; 603 } 604 mb(); 605 ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE); 606 mutex_unlock(&ggtt->error_mutex); 607 #endif 608 } 609 610 static struct sg_table * 611 i915_pages_create_for_stolen(struct drm_device *dev, 612 resource_size_t offset, resource_size_t size) 613 { 614 struct drm_i915_private *i915 = to_i915(dev); 615 struct sg_table *st; 616 struct scatterlist *sg; 617 618 GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm.stolen))); 619 620 /* We hide that we have no struct page backing our stolen object 621 * by wrapping the contiguous physical allocation with a fake 622 * dma mapping in a single scatterlist. 623 */ 624 625 st = kmalloc(sizeof(*st), GFP_KERNEL); 626 if (st == NULL) 627 return ERR_PTR(-ENOMEM); 628 629 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 630 kfree(st); 631 return ERR_PTR(-ENOMEM); 632 } 633 634 sg = st->sgl; 635 sg->offset = 0; 636 sg->length = size; 637 638 sg_dma_address(sg) = (dma_addr_t)i915->dsm.stolen.start + offset; 639 sg_dma_len(sg) = size; 640 641 return st; 642 } 643 644 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) 645 { 646 struct drm_i915_private *i915 = to_i915(obj->base.dev); 647 struct sg_table *pages = 648 i915_pages_create_for_stolen(obj->base.dev, 649 obj->stolen->start, 650 obj->stolen->size); 651 if (IS_ERR(pages)) 652 return PTR_ERR(pages); 653 654 dbg_poison(to_gt(i915)->ggtt, 655 sg_dma_address(pages->sgl), 656 sg_dma_len(pages->sgl), 657 POISON_INUSE); 658 659 __i915_gem_object_set_pages(obj, pages); 660 661 return 0; 662 } 663 664 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, 665 struct sg_table *pages) 666 { 667 struct drm_i915_private *i915 = to_i915(obj->base.dev); 668 /* Should only be called from i915_gem_object_release_stolen() */ 669 670 dbg_poison(to_gt(i915)->ggtt, 671 sg_dma_address(pages->sgl), 672 sg_dma_len(pages->sgl), 673 POISON_FREE); 674 675 sg_free_table(pages); 676 kfree(pages); 677 } 678 679 static void 680 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 681 { 682 struct drm_i915_private *i915 = to_i915(obj->base.dev); 683 struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); 684 685 GEM_BUG_ON(!stolen); 686 i915_gem_stolen_remove_node(i915, stolen); 687 kfree(stolen); 688 689 i915_gem_object_release_memory_region(obj); 690 } 691 692 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { 693 .name = "i915_gem_object_stolen", 694 .get_pages = i915_gem_object_get_pages_stolen, 695 .put_pages = i915_gem_object_put_pages_stolen, 696 .release = i915_gem_object_release_stolen, 697 }; 698 699 static int __i915_gem_object_create_stolen(struct intel_memory_region *mem, 700 struct drm_i915_gem_object *obj, 701 struct drm_mm_node *stolen) 702 { 703 static struct lock_class_key lock_class; 704 unsigned int cache_level; 705 unsigned int flags; 706 int err; 707 708 /* 709 * Stolen objects are always physically contiguous since we just 710 * allocate one big block underneath using the drm_mm range allocator. 711 */ 712 flags = I915_BO_ALLOC_CONTIGUOUS; 713 714 drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size); 715 i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags); 716 717 obj->stolen = stolen; 718 obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; 719 cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE; 720 i915_gem_object_set_cache_coherency(obj, cache_level); 721 722 if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) 723 return -EBUSY; 724 725 i915_gem_object_init_memory_region(obj, mem); 726 727 err = i915_gem_object_pin_pages(obj); 728 if (err) 729 i915_gem_object_release_memory_region(obj); 730 i915_gem_object_unlock(obj); 731 732 return err; 733 } 734 735 static int _i915_gem_object_stolen_init(struct intel_memory_region *mem, 736 struct drm_i915_gem_object *obj, 737 resource_size_t offset, 738 resource_size_t size, 739 resource_size_t page_size, 740 unsigned int flags) 741 { 742 struct drm_i915_private *i915 = mem->i915; 743 struct drm_mm_node *stolen; 744 int ret; 745 746 if (!drm_mm_initialized(&i915->mm.stolen)) 747 return -ENODEV; 748 749 if (size == 0) 750 return -EINVAL; 751 752 /* 753 * With discrete devices, where we lack a mappable aperture there is no 754 * possible way to ever access this memory on the CPU side. 755 */ 756 if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !resource_size(&mem->io) && 757 !(flags & I915_BO_ALLOC_GPU_ONLY)) 758 return -ENOSPC; 759 760 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 761 if (!stolen) 762 return -ENOMEM; 763 764 if (offset != I915_BO_INVALID_OFFSET) { 765 drm_dbg(&i915->drm, 766 "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n", 767 &offset, &size); 768 769 stolen->start = offset; 770 stolen->size = size; 771 mutex_lock(&i915->mm.stolen_lock); 772 ret = drm_mm_reserve_node(&i915->mm.stolen, stolen); 773 mutex_unlock(&i915->mm.stolen_lock); 774 } else { 775 ret = i915_gem_stolen_insert_node(i915, stolen, size, 776 mem->min_page_size); 777 } 778 if (ret) 779 goto err_free; 780 781 ret = __i915_gem_object_create_stolen(mem, obj, stolen); 782 if (ret) 783 goto err_remove; 784 785 return 0; 786 787 err_remove: 788 i915_gem_stolen_remove_node(i915, stolen); 789 err_free: 790 kfree(stolen); 791 return ret; 792 } 793 794 struct drm_i915_gem_object * 795 i915_gem_object_create_stolen(struct drm_i915_private *i915, 796 resource_size_t size) 797 { 798 return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0); 799 } 800 801 static int init_stolen_smem(struct intel_memory_region *mem) 802 { 803 int err; 804 805 /* 806 * Initialise stolen early so that we may reserve preallocated 807 * objects for the BIOS to KMS transition. 808 */ 809 err = i915_gem_init_stolen(mem); 810 if (err) 811 drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n"); 812 813 return 0; 814 } 815 816 static int release_stolen_smem(struct intel_memory_region *mem) 817 { 818 i915_gem_cleanup_stolen(mem->i915); 819 return 0; 820 } 821 822 static const struct intel_memory_region_ops i915_region_stolen_smem_ops = { 823 .init = init_stolen_smem, 824 .release = release_stolen_smem, 825 .init_object = _i915_gem_object_stolen_init, 826 }; 827 828 static int init_stolen_lmem(struct intel_memory_region *mem) 829 { 830 int err; 831 832 if (GEM_WARN_ON(resource_size(&mem->region) == 0)) 833 return 0; 834 835 err = i915_gem_init_stolen(mem); 836 if (err) { 837 drm_dbg(&mem->i915->drm, "Skip stolen region: failed to setup\n"); 838 return 0; 839 } 840 841 if (resource_size(&mem->io) && 842 !io_mapping_init_wc(&mem->iomap, mem->io.start, resource_size(&mem->io))) 843 goto err_cleanup; 844 845 return 0; 846 847 err_cleanup: 848 i915_gem_cleanup_stolen(mem->i915); 849 return err; 850 } 851 852 static int release_stolen_lmem(struct intel_memory_region *mem) 853 { 854 if (resource_size(&mem->io)) 855 io_mapping_fini(&mem->iomap); 856 i915_gem_cleanup_stolen(mem->i915); 857 return 0; 858 } 859 860 static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = { 861 .init = init_stolen_lmem, 862 .release = release_stolen_lmem, 863 .init_object = _i915_gem_object_stolen_init, 864 }; 865 866 static int mtl_get_gms_size(struct intel_uncore *uncore) 867 { 868 u16 ggc, gms; 869 870 ggc = intel_uncore_read16(uncore, GGC); 871 872 /* check GGMS, should be fixed 0x3 (8MB) */ 873 if ((ggc & GGMS_MASK) != GGMS_MASK) 874 return -EIO; 875 876 /* return valid GMS value, -EIO if invalid */ 877 gms = REG_FIELD_GET(GMS_MASK, ggc); 878 switch (gms) { 879 case 0x0 ... 0x04: 880 return gms * 32; 881 case 0xf0 ... 0xfe: 882 return (gms - 0xf0 + 1) * 4; 883 default: 884 MISSING_CASE(gms); 885 return -EIO; 886 } 887 } 888 889 struct intel_memory_region * 890 i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type, 891 u16 instance) 892 { 893 struct intel_uncore *uncore = &i915->uncore; 894 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 895 resource_size_t dsm_size, dsm_base, lmem_size; 896 struct intel_memory_region *mem; 897 resource_size_t io_start, io_size; 898 resource_size_t min_page_size; 899 int ret; 900 901 if (WARN_ON_ONCE(instance)) 902 return ERR_PTR(-ENODEV); 903 904 if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR)) 905 return ERR_PTR(-ENXIO); 906 907 if (HAS_LMEMBAR_SMEM_STOLEN(i915) || IS_DG1(i915)) { 908 lmem_size = pci_resource_len(pdev, GEN12_LMEM_BAR); 909 } else { 910 resource_size_t lmem_range; 911 912 lmem_range = intel_gt_mcr_read_any(to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF; 913 lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT; 914 lmem_size *= SZ_1G; 915 } 916 917 if (HAS_LMEMBAR_SMEM_STOLEN(i915)) { 918 /* 919 * MTL dsm size is in GGC register. 920 * Also MTL uses offset to GSMBASE in ptes, so i915 921 * uses dsm_base = 8MBs to setup stolen region, since 922 * DSMBASE = GSMBASE + 8MB. 923 */ 924 ret = mtl_get_gms_size(uncore); 925 if (ret < 0) { 926 drm_err(&i915->drm, "invalid MTL GGC register setting\n"); 927 return ERR_PTR(ret); 928 } 929 930 dsm_base = SZ_8M; 931 dsm_size = (resource_size_t)(ret * SZ_1M); 932 933 GEM_BUG_ON(pci_resource_len(pdev, GEN12_LMEM_BAR) != SZ_256M); 934 GEM_BUG_ON((dsm_base + dsm_size) > lmem_size); 935 } else { 936 /* Use DSM base address instead for stolen memory */ 937 dsm_base = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK; 938 if (lmem_size < dsm_base) { 939 drm_dbg(&i915->drm, 940 "Disabling stolen memory support due to OOB placement: lmem_size = %pa vs dsm_base = %pa\n", 941 &lmem_size, &dsm_base); 942 return NULL; 943 } 944 dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M); 945 } 946 947 if (i915_direct_stolen_access(i915)) { 948 drm_dbg(&i915->drm, "Using direct DSM access\n"); 949 io_start = intel_uncore_read64(uncore, GEN6_DSMBASE) & GEN11_BDSM_MASK; 950 io_size = dsm_size; 951 } else if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) { 952 io_start = 0; 953 io_size = 0; 954 } else { 955 io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base; 956 io_size = dsm_size; 957 } 958 959 min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K : 960 I915_GTT_PAGE_SIZE_4K; 961 962 mem = intel_memory_region_create(i915, dsm_base, dsm_size, 963 min_page_size, 964 io_start, io_size, 965 type, instance, 966 &i915_region_stolen_lmem_ops); 967 if (IS_ERR(mem)) 968 return mem; 969 970 intel_memory_region_set_name(mem, "stolen-local"); 971 972 mem->private = true; 973 974 return mem; 975 } 976 977 struct intel_memory_region* 978 i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type, 979 u16 instance) 980 { 981 struct intel_memory_region *mem; 982 983 mem = intel_memory_region_create(i915, 984 intel_graphics_stolen_res.start, 985 resource_size(&intel_graphics_stolen_res), 986 PAGE_SIZE, 0, 0, type, instance, 987 &i915_region_stolen_smem_ops); 988 if (IS_ERR(mem)) 989 return mem; 990 991 intel_memory_region_set_name(mem, "stolen-system"); 992 993 mem->private = true; 994 995 return mem; 996 } 997 998 bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj) 999 { 1000 return obj->ops == &i915_gem_object_stolen_ops; 1001 } 1002 1003 bool i915_gem_stolen_initialized(const struct drm_i915_private *i915) 1004 { 1005 return drm_mm_initialized(&i915->mm.stolen); 1006 } 1007 1008 u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915) 1009 { 1010 return i915->dsm.stolen.start; 1011 } 1012 1013 u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915) 1014 { 1015 return resource_size(&i915->dsm.stolen); 1016 } 1017 1018 u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915, 1019 const struct drm_mm_node *node) 1020 { 1021 return i915->dsm.stolen.start + i915_gem_stolen_node_offset(node); 1022 } 1023 1024 bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node) 1025 { 1026 return drm_mm_node_allocated(node); 1027 } 1028 1029 u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node) 1030 { 1031 return node->start; 1032 } 1033 1034 u64 i915_gem_stolen_node_size(const struct drm_mm_node *node) 1035 { 1036 return node->size; 1037 } 1038