1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_bo.h" 7 8 #include <linux/dma-buf.h> 9 #include <linux/nospec.h> 10 11 #include <drm/drm_drv.h> 12 #include <drm/drm_dumb_buffers.h> 13 #include <drm/drm_gem_ttm_helper.h> 14 #include <drm/drm_managed.h> 15 #include <drm/ttm/ttm_backup.h> 16 #include <drm/ttm/ttm_device.h> 17 #include <drm/ttm/ttm_placement.h> 18 #include <drm/ttm/ttm_tt.h> 19 #include <uapi/drm/xe_drm.h> 20 21 #include <kunit/static_stub.h> 22 23 #include <trace/events/gpu_mem.h> 24 25 #include "xe_device.h" 26 #include "xe_dma_buf.h" 27 #include "xe_drm_client.h" 28 #include "xe_ggtt.h" 29 #include "xe_gt.h" 30 #include "xe_map.h" 31 #include "xe_migrate.h" 32 #include "xe_pm.h" 33 #include "xe_preempt_fence.h" 34 #include "xe_pxp.h" 35 #include "xe_res_cursor.h" 36 #include "xe_shrinker.h" 37 #include "xe_sriov_vf_ccs.h" 38 #include "xe_tile.h" 39 #include "xe_trace_bo.h" 40 #include "xe_ttm_stolen_mgr.h" 41 #include "xe_vm.h" 42 #include "xe_vram_types.h" 43 44 const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES] = { 45 [XE_PL_SYSTEM] = "system", 46 [XE_PL_TT] = "gtt", 47 [XE_PL_VRAM0] = "vram0", 48 [XE_PL_VRAM1] = "vram1", 49 [XE_PL_STOLEN] = "stolen" 50 }; 51 52 static const struct ttm_place sys_placement_flags = { 53 .fpfn = 0, 54 .lpfn = 0, 55 .mem_type = XE_PL_SYSTEM, 56 .flags = 0, 57 }; 58 59 static struct ttm_placement sys_placement = { 60 .num_placement = 1, 61 .placement = &sys_placement_flags, 62 }; 63 64 static struct ttm_placement purge_placement; 65 66 static const struct ttm_place tt_placement_flags[] = { 67 { 68 .fpfn = 0, 69 .lpfn = 0, 70 .mem_type = XE_PL_TT, 71 .flags = TTM_PL_FLAG_DESIRED, 72 }, 73 { 74 .fpfn = 0, 75 .lpfn = 0, 76 .mem_type = XE_PL_SYSTEM, 77 .flags = TTM_PL_FLAG_FALLBACK, 78 } 79 }; 80 81 static struct ttm_placement tt_placement = { 82 .num_placement = 2, 83 .placement = tt_placement_flags, 84 }; 85 86 #define for_each_set_bo_vram_flag(bit__, bo_flags__) \ 87 for (unsigned int __bit_tmp = BIT(0); __bit_tmp <= XE_BO_FLAG_VRAM_MASK; __bit_tmp <<= 1) \ 88 for_each_if(((bit__) = __bit_tmp) & (bo_flags__) & XE_BO_FLAG_VRAM_MASK) 89 90 bool mem_type_is_vram(u32 mem_type) 91 { 92 return mem_type >= XE_PL_VRAM0 && mem_type != XE_PL_STOLEN; 93 } 94 95 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res) 96 { 97 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe); 98 } 99 100 static bool resource_is_vram(struct ttm_resource *res) 101 { 102 return mem_type_is_vram(res->mem_type); 103 } 104 105 bool xe_bo_is_vram(struct xe_bo *bo) 106 { 107 return resource_is_vram(bo->ttm.resource) || 108 resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource); 109 } 110 111 bool xe_bo_is_stolen(struct xe_bo *bo) 112 { 113 return bo->ttm.resource->mem_type == XE_PL_STOLEN; 114 } 115 116 /** 117 * xe_bo_has_single_placement - check if BO is placed only in one memory location 118 * @bo: The BO 119 * 120 * This function checks whether a given BO is placed in only one memory location. 121 * 122 * Returns: true if the BO is placed in a single memory location, false otherwise. 123 * 124 */ 125 bool xe_bo_has_single_placement(struct xe_bo *bo) 126 { 127 return bo->placement.num_placement == 1; 128 } 129 130 /** 131 * xe_bo_is_stolen_devmem - check if BO is of stolen type accessed via PCI BAR 132 * @bo: The BO 133 * 134 * The stolen memory is accessed through the PCI BAR for both DGFX and some 135 * integrated platforms that have a dedicated bit in the PTE for devmem (DM). 136 * 137 * Returns: true if it's stolen memory accessed via PCI BAR, false otherwise. 138 */ 139 bool xe_bo_is_stolen_devmem(struct xe_bo *bo) 140 { 141 return xe_bo_is_stolen(bo) && 142 GRAPHICS_VERx100(xe_bo_device(bo)) >= 1270; 143 } 144 145 /** 146 * xe_bo_is_vm_bound - check if BO has any mappings through VM_BIND 147 * @bo: The BO 148 * 149 * Check if a given bo is bound through VM_BIND. This requires the 150 * reservation lock for the BO to be held. 151 * 152 * Returns: boolean 153 */ 154 bool xe_bo_is_vm_bound(struct xe_bo *bo) 155 { 156 xe_bo_assert_held(bo); 157 158 return !list_empty(&bo->ttm.base.gpuva.list); 159 } 160 161 static bool xe_bo_is_user(struct xe_bo *bo) 162 { 163 return bo->flags & XE_BO_FLAG_USER; 164 } 165 166 static struct xe_migrate * 167 mem_type_to_migrate(struct xe_device *xe, u32 mem_type) 168 { 169 struct xe_tile *tile; 170 171 xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type)); 172 tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)]; 173 return tile->migrate; 174 } 175 176 static struct xe_vram_region *res_to_mem_region(struct ttm_resource *res) 177 { 178 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); 179 struct ttm_resource_manager *mgr; 180 struct xe_ttm_vram_mgr *vram_mgr; 181 182 xe_assert(xe, resource_is_vram(res)); 183 mgr = ttm_manager_type(&xe->ttm, res->mem_type); 184 vram_mgr = to_xe_ttm_vram_mgr(mgr); 185 186 return container_of(vram_mgr, struct xe_vram_region, ttm); 187 } 188 189 static void try_add_system(struct xe_device *xe, struct xe_bo *bo, 190 u32 bo_flags, u32 *c) 191 { 192 if (bo_flags & XE_BO_FLAG_SYSTEM) { 193 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); 194 195 bo->placements[*c] = (struct ttm_place) { 196 .mem_type = XE_PL_TT, 197 .flags = (bo_flags & XE_BO_FLAG_VRAM_MASK) ? 198 TTM_PL_FLAG_FALLBACK : 0, 199 }; 200 *c += 1; 201 } 202 } 203 204 static bool force_contiguous(u32 bo_flags) 205 { 206 if (bo_flags & XE_BO_FLAG_STOLEN) 207 return true; /* users expect this */ 208 else if (bo_flags & XE_BO_FLAG_PINNED && 209 !(bo_flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) 210 return true; /* needs vmap */ 211 else if (bo_flags & XE_BO_FLAG_CPU_ADDR_MIRROR) 212 return true; 213 214 /* 215 * For eviction / restore on suspend / resume objects pinned in VRAM 216 * must be contiguous, also only contiguous BOs support xe_bo_vmap. 217 */ 218 return bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS && 219 bo_flags & XE_BO_FLAG_PINNED; 220 } 221 222 static u8 vram_bo_flag_to_tile_id(struct xe_device *xe, u32 vram_bo_flag) 223 { 224 xe_assert(xe, vram_bo_flag & XE_BO_FLAG_VRAM_MASK); 225 xe_assert(xe, (vram_bo_flag & (vram_bo_flag - 1)) == 0); 226 227 return __ffs(vram_bo_flag >> (__ffs(XE_BO_FLAG_VRAM0) - 1)) - 1; 228 } 229 230 static u32 bo_vram_flags_to_vram_placement(struct xe_device *xe, u32 bo_flags, u32 vram_flag, 231 enum ttm_bo_type type) 232 { 233 u8 tile_id = vram_bo_flag_to_tile_id(xe, vram_flag); 234 235 xe_assert(xe, tile_id < xe->info.tile_count); 236 237 if (type == ttm_bo_type_kernel && !(bo_flags & XE_BO_FLAG_FORCE_USER_VRAM)) 238 return xe->tiles[tile_id].mem.kernel_vram->placement; 239 else 240 return xe->tiles[tile_id].mem.vram->placement; 241 } 242 243 static void add_vram(struct xe_device *xe, struct xe_bo *bo, 244 struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c) 245 { 246 struct ttm_place place = { .mem_type = mem_type }; 247 struct ttm_resource_manager *mgr = ttm_manager_type(&xe->ttm, mem_type); 248 struct xe_ttm_vram_mgr *vram_mgr = to_xe_ttm_vram_mgr(mgr); 249 250 struct xe_vram_region *vram; 251 u64 io_size; 252 253 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); 254 255 vram = container_of(vram_mgr, struct xe_vram_region, ttm); 256 xe_assert(xe, vram && vram->usable_size); 257 io_size = vram->io_size; 258 259 if (force_contiguous(bo_flags)) 260 place.flags |= TTM_PL_FLAG_CONTIGUOUS; 261 262 if (io_size < vram->usable_size) { 263 if (bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) { 264 place.fpfn = 0; 265 place.lpfn = io_size >> PAGE_SHIFT; 266 } else { 267 place.flags |= TTM_PL_FLAG_TOPDOWN; 268 } 269 } 270 places[*c] = place; 271 *c += 1; 272 } 273 274 static void try_add_vram(struct xe_device *xe, struct xe_bo *bo, 275 u32 bo_flags, enum ttm_bo_type type, u32 *c) 276 { 277 u32 vram_flag; 278 279 for_each_set_bo_vram_flag(vram_flag, bo_flags) { 280 u32 pl = bo_vram_flags_to_vram_placement(xe, bo_flags, vram_flag, type); 281 282 add_vram(xe, bo, bo->placements, bo_flags, pl, c); 283 } 284 } 285 286 static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo, 287 u32 bo_flags, u32 *c) 288 { 289 if (bo_flags & XE_BO_FLAG_STOLEN) { 290 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); 291 292 bo->placements[*c] = (struct ttm_place) { 293 .mem_type = XE_PL_STOLEN, 294 .flags = force_contiguous(bo_flags) ? 295 TTM_PL_FLAG_CONTIGUOUS : 0, 296 }; 297 *c += 1; 298 } 299 } 300 301 static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, 302 u32 bo_flags, enum ttm_bo_type type) 303 { 304 u32 c = 0; 305 306 try_add_vram(xe, bo, bo_flags, type, &c); 307 try_add_system(xe, bo, bo_flags, &c); 308 try_add_stolen(xe, bo, bo_flags, &c); 309 310 if (!c) 311 return -EINVAL; 312 313 bo->placement = (struct ttm_placement) { 314 .num_placement = c, 315 .placement = bo->placements, 316 }; 317 318 return 0; 319 } 320 321 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, 322 u32 bo_flags, enum ttm_bo_type type) 323 { 324 xe_bo_assert_held(bo); 325 return __xe_bo_placement_for_flags(xe, bo, bo_flags, type); 326 } 327 328 static void xe_evict_flags(struct ttm_buffer_object *tbo, 329 struct ttm_placement *placement) 330 { 331 struct xe_device *xe = container_of(tbo->bdev, typeof(*xe), ttm); 332 bool device_unplugged = drm_dev_is_unplugged(&xe->drm); 333 struct xe_bo *bo; 334 335 if (!xe_bo_is_xe_bo(tbo)) { 336 /* Don't handle scatter gather BOs */ 337 if (tbo->type == ttm_bo_type_sg) { 338 placement->num_placement = 0; 339 return; 340 } 341 342 *placement = device_unplugged ? purge_placement : sys_placement; 343 return; 344 } 345 346 bo = ttm_to_xe_bo(tbo); 347 if (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) { 348 *placement = sys_placement; 349 return; 350 } 351 352 if (device_unplugged && !tbo->base.dma_buf) { 353 *placement = purge_placement; 354 return; 355 } 356 357 /* 358 * For xe, sg bos that are evicted to system just triggers a 359 * rebind of the sg list upon subsequent validation to XE_PL_TT. 360 */ 361 switch (tbo->resource->mem_type) { 362 case XE_PL_VRAM0: 363 case XE_PL_VRAM1: 364 case XE_PL_STOLEN: 365 *placement = tt_placement; 366 break; 367 case XE_PL_TT: 368 default: 369 *placement = sys_placement; 370 break; 371 } 372 } 373 374 /* struct xe_ttm_tt - Subclassed ttm_tt for xe */ 375 struct xe_ttm_tt { 376 struct ttm_tt ttm; 377 struct sg_table sgt; 378 struct sg_table *sg; 379 /** @purgeable: Whether the content of the pages of @ttm is purgeable. */ 380 bool purgeable; 381 }; 382 383 static int xe_tt_map_sg(struct xe_device *xe, struct ttm_tt *tt) 384 { 385 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); 386 unsigned long num_pages = tt->num_pages; 387 int ret; 388 389 XE_WARN_ON((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && 390 !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)); 391 392 if (xe_tt->sg) 393 return 0; 394 395 ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages, 396 num_pages, 0, 397 (u64)num_pages << PAGE_SHIFT, 398 xe_sg_segment_size(xe->drm.dev), 399 GFP_KERNEL); 400 if (ret) 401 return ret; 402 403 xe_tt->sg = &xe_tt->sgt; 404 ret = dma_map_sgtable(xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL, 405 DMA_ATTR_SKIP_CPU_SYNC); 406 if (ret) { 407 sg_free_table(xe_tt->sg); 408 xe_tt->sg = NULL; 409 return ret; 410 } 411 412 return 0; 413 } 414 415 static void xe_tt_unmap_sg(struct xe_device *xe, struct ttm_tt *tt) 416 { 417 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); 418 419 if (xe_tt->sg) { 420 dma_unmap_sgtable(xe->drm.dev, xe_tt->sg, 421 DMA_BIDIRECTIONAL, 0); 422 sg_free_table(xe_tt->sg); 423 xe_tt->sg = NULL; 424 } 425 } 426 427 struct sg_table *xe_bo_sg(struct xe_bo *bo) 428 { 429 struct ttm_tt *tt = bo->ttm.ttm; 430 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); 431 432 return xe_tt->sg; 433 } 434 435 /* 436 * Account ttm pages against the device shrinker's shrinkable and 437 * purgeable counts. 438 */ 439 static void xe_ttm_tt_account_add(struct xe_device *xe, struct ttm_tt *tt) 440 { 441 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); 442 443 if (xe_tt->purgeable) 444 xe_shrinker_mod_pages(xe->mem.shrinker, 0, tt->num_pages); 445 else 446 xe_shrinker_mod_pages(xe->mem.shrinker, tt->num_pages, 0); 447 } 448 449 static void xe_ttm_tt_account_subtract(struct xe_device *xe, struct ttm_tt *tt) 450 { 451 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); 452 453 if (xe_tt->purgeable) 454 xe_shrinker_mod_pages(xe->mem.shrinker, 0, -(long)tt->num_pages); 455 else 456 xe_shrinker_mod_pages(xe->mem.shrinker, -(long)tt->num_pages, 0); 457 } 458 459 static void update_global_total_pages(struct ttm_device *ttm_dev, 460 long num_pages) 461 { 462 #if IS_ENABLED(CONFIG_TRACE_GPU_MEM) 463 struct xe_device *xe = ttm_to_xe_device(ttm_dev); 464 u64 global_total_pages = 465 atomic64_add_return(num_pages, &xe->global_total_pages); 466 467 trace_gpu_mem_total(xe->drm.primary->index, 0, 468 global_total_pages << PAGE_SHIFT); 469 #endif 470 } 471 472 static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, 473 u32 page_flags) 474 { 475 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); 476 struct xe_device *xe = xe_bo_device(bo); 477 struct xe_ttm_tt *xe_tt; 478 struct ttm_tt *tt; 479 unsigned long extra_pages; 480 enum ttm_caching caching = ttm_cached; 481 int err; 482 483 xe_tt = kzalloc(sizeof(*xe_tt), GFP_KERNEL); 484 if (!xe_tt) 485 return NULL; 486 487 tt = &xe_tt->ttm; 488 489 extra_pages = 0; 490 if (xe_bo_needs_ccs_pages(bo)) 491 extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, xe_bo_size(bo)), 492 PAGE_SIZE); 493 494 /* 495 * DGFX system memory is always WB / ttm_cached, since 496 * other caching modes are only supported on x86. DGFX 497 * GPU system memory accesses are always coherent with the 498 * CPU. 499 */ 500 if (!IS_DGFX(xe)) { 501 switch (bo->cpu_caching) { 502 case DRM_XE_GEM_CPU_CACHING_WC: 503 caching = ttm_write_combined; 504 break; 505 default: 506 caching = ttm_cached; 507 break; 508 } 509 510 WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching); 511 512 /* 513 * Display scanout is always non-coherent with the CPU cache. 514 * 515 * For Xe_LPG and beyond, PPGTT PTE lookups are also 516 * non-coherent and require a CPU:WC mapping. 517 */ 518 if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) || 519 (xe->info.graphics_verx100 >= 1270 && 520 bo->flags & XE_BO_FLAG_PAGETABLE)) 521 caching = ttm_write_combined; 522 } 523 524 if (bo->flags & XE_BO_FLAG_NEEDS_UC) { 525 /* 526 * Valid only for internally-created buffers only, for 527 * which cpu_caching is never initialized. 528 */ 529 xe_assert(xe, bo->cpu_caching == 0); 530 caching = ttm_uncached; 531 } 532 533 if (ttm_bo->type != ttm_bo_type_sg) 534 page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE; 535 536 err = ttm_tt_init(tt, &bo->ttm, page_flags, caching, extra_pages); 537 if (err) { 538 kfree(xe_tt); 539 return NULL; 540 } 541 542 if (ttm_bo->type != ttm_bo_type_sg) { 543 err = ttm_tt_setup_backup(tt); 544 if (err) { 545 ttm_tt_fini(tt); 546 kfree(xe_tt); 547 return NULL; 548 } 549 } 550 551 return tt; 552 } 553 554 static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt, 555 struct ttm_operation_ctx *ctx) 556 { 557 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); 558 int err; 559 560 /* 561 * dma-bufs are not populated with pages, and the dma- 562 * addresses are set up when moved to XE_PL_TT. 563 */ 564 if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && 565 !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) 566 return 0; 567 568 if (ttm_tt_is_backed_up(tt) && !xe_tt->purgeable) { 569 err = ttm_tt_restore(ttm_dev, tt, ctx); 570 } else { 571 ttm_tt_clear_backed_up(tt); 572 err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx); 573 } 574 if (err) 575 return err; 576 577 xe_tt->purgeable = false; 578 xe_ttm_tt_account_add(ttm_to_xe_device(ttm_dev), tt); 579 update_global_total_pages(ttm_dev, tt->num_pages); 580 581 return 0; 582 } 583 584 static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt) 585 { 586 struct xe_device *xe = ttm_to_xe_device(ttm_dev); 587 588 if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) && 589 !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) 590 return; 591 592 xe_tt_unmap_sg(xe, tt); 593 594 ttm_pool_free(&ttm_dev->pool, tt); 595 xe_ttm_tt_account_subtract(xe, tt); 596 update_global_total_pages(ttm_dev, -(long)tt->num_pages); 597 } 598 599 static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt) 600 { 601 ttm_tt_fini(tt); 602 kfree(tt); 603 } 604 605 static bool xe_ttm_resource_visible(struct ttm_resource *mem) 606 { 607 struct xe_ttm_vram_mgr_resource *vres = 608 to_xe_ttm_vram_mgr_resource(mem); 609 610 return vres->used_visible_size == mem->size; 611 } 612 613 /** 614 * xe_bo_is_visible_vram - check if BO is placed entirely in visible VRAM. 615 * @bo: The BO 616 * 617 * This function checks whether a given BO resides entirely in memory visible from the CPU 618 * 619 * Returns: true if the BO is entirely visible, false otherwise. 620 * 621 */ 622 bool xe_bo_is_visible_vram(struct xe_bo *bo) 623 { 624 if (drm_WARN_ON(bo->ttm.base.dev, !xe_bo_is_vram(bo))) 625 return false; 626 627 return xe_ttm_resource_visible(bo->ttm.resource); 628 } 629 630 static int xe_ttm_io_mem_reserve(struct ttm_device *bdev, 631 struct ttm_resource *mem) 632 { 633 struct xe_device *xe = ttm_to_xe_device(bdev); 634 635 switch (mem->mem_type) { 636 case XE_PL_SYSTEM: 637 case XE_PL_TT: 638 return 0; 639 case XE_PL_VRAM0: 640 case XE_PL_VRAM1: { 641 struct xe_vram_region *vram = res_to_mem_region(mem); 642 643 if (!xe_ttm_resource_visible(mem)) 644 return -EINVAL; 645 646 mem->bus.offset = mem->start << PAGE_SHIFT; 647 648 if (vram->mapping && 649 mem->placement & TTM_PL_FLAG_CONTIGUOUS) 650 mem->bus.addr = (u8 __force *)vram->mapping + 651 mem->bus.offset; 652 653 mem->bus.offset += vram->io_start; 654 mem->bus.is_iomem = true; 655 656 #if !IS_ENABLED(CONFIG_X86) 657 mem->bus.caching = ttm_write_combined; 658 #endif 659 return 0; 660 } case XE_PL_STOLEN: 661 return xe_ttm_stolen_io_mem_reserve(xe, mem); 662 default: 663 return -EINVAL; 664 } 665 } 666 667 static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo, 668 const struct ttm_operation_ctx *ctx) 669 { 670 struct dma_resv_iter cursor; 671 struct dma_fence *fence; 672 struct drm_gem_object *obj = &bo->ttm.base; 673 struct drm_gpuvm_bo *vm_bo; 674 bool idle = false; 675 int ret = 0; 676 677 dma_resv_assert_held(bo->ttm.base.resv); 678 679 if (!list_empty(&bo->ttm.base.gpuva.list)) { 680 dma_resv_iter_begin(&cursor, bo->ttm.base.resv, 681 DMA_RESV_USAGE_BOOKKEEP); 682 dma_resv_for_each_fence_unlocked(&cursor, fence) 683 dma_fence_enable_sw_signaling(fence); 684 dma_resv_iter_end(&cursor); 685 } 686 687 drm_gem_for_each_gpuvm_bo(vm_bo, obj) { 688 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); 689 struct drm_gpuva *gpuva; 690 691 if (!xe_vm_in_fault_mode(vm)) { 692 drm_gpuvm_bo_evict(vm_bo, true); 693 continue; 694 } 695 696 if (!idle) { 697 long timeout; 698 699 if (ctx->no_wait_gpu && 700 !dma_resv_test_signaled(bo->ttm.base.resv, 701 DMA_RESV_USAGE_BOOKKEEP)) 702 return -EBUSY; 703 704 timeout = dma_resv_wait_timeout(bo->ttm.base.resv, 705 DMA_RESV_USAGE_BOOKKEEP, 706 ctx->interruptible, 707 MAX_SCHEDULE_TIMEOUT); 708 if (!timeout) 709 return -ETIME; 710 if (timeout < 0) 711 return timeout; 712 713 idle = true; 714 } 715 716 drm_gpuvm_bo_for_each_va(gpuva, vm_bo) { 717 struct xe_vma *vma = gpuva_to_vma(gpuva); 718 719 trace_xe_vma_evict(vma); 720 ret = xe_vm_invalidate_vma(vma); 721 if (XE_WARN_ON(ret)) 722 return ret; 723 } 724 } 725 726 return ret; 727 } 728 729 /* 730 * The dma-buf map_attachment() / unmap_attachment() is hooked up here. 731 * Note that unmapping the attachment is deferred to the next 732 * map_attachment time, or to bo destroy (after idling) whichever comes first. 733 * This is to avoid syncing before unmap_attachment(), assuming that the 734 * caller relies on idling the reservation object before moving the 735 * backing store out. Should that assumption not hold, then we will be able 736 * to unconditionally call unmap_attachment() when moving out to system. 737 */ 738 static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo, 739 struct ttm_resource *new_res) 740 { 741 struct dma_buf_attachment *attach = ttm_bo->base.import_attach; 742 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt, 743 ttm); 744 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); 745 bool device_unplugged = drm_dev_is_unplugged(&xe->drm); 746 struct sg_table *sg; 747 748 xe_assert(xe, attach); 749 xe_assert(xe, ttm_bo->ttm); 750 751 if (device_unplugged && new_res->mem_type == XE_PL_SYSTEM && 752 ttm_bo->sg) { 753 dma_resv_wait_timeout(ttm_bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, 754 false, MAX_SCHEDULE_TIMEOUT); 755 dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL); 756 ttm_bo->sg = NULL; 757 } 758 759 if (new_res->mem_type == XE_PL_SYSTEM) 760 goto out; 761 762 if (ttm_bo->sg) { 763 dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL); 764 ttm_bo->sg = NULL; 765 } 766 767 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 768 if (IS_ERR(sg)) 769 return PTR_ERR(sg); 770 771 ttm_bo->sg = sg; 772 xe_tt->sg = sg; 773 774 out: 775 ttm_bo_move_null(ttm_bo, new_res); 776 777 return 0; 778 } 779 780 /** 781 * xe_bo_move_notify - Notify subsystems of a pending move 782 * @bo: The buffer object 783 * @ctx: The struct ttm_operation_ctx controlling locking and waits. 784 * 785 * This function notifies subsystems of an upcoming buffer move. 786 * Upon receiving such a notification, subsystems should schedule 787 * halting access to the underlying pages and optionally add a fence 788 * to the buffer object's dma_resv object, that signals when access is 789 * stopped. The caller will wait on all dma_resv fences before 790 * starting the move. 791 * 792 * A subsystem may commence access to the object after obtaining 793 * bindings to the new backing memory under the object lock. 794 * 795 * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode, 796 * negative error code on error. 797 */ 798 static int xe_bo_move_notify(struct xe_bo *bo, 799 const struct ttm_operation_ctx *ctx) 800 { 801 struct ttm_buffer_object *ttm_bo = &bo->ttm; 802 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); 803 struct ttm_resource *old_mem = ttm_bo->resource; 804 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM; 805 int ret; 806 807 /* 808 * If this starts to call into many components, consider 809 * using a notification chain here. 810 */ 811 812 if (xe_bo_is_pinned(bo)) 813 return -EINVAL; 814 815 xe_bo_vunmap(bo); 816 ret = xe_bo_trigger_rebind(xe, bo, ctx); 817 if (ret) 818 return ret; 819 820 /* Don't call move_notify() for imported dma-bufs. */ 821 if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach) 822 dma_buf_move_notify(ttm_bo->base.dma_buf); 823 824 /* 825 * TTM has already nuked the mmap for us (see ttm_bo_unmap_virtual), 826 * so if we moved from VRAM make sure to unlink this from the userfault 827 * tracking. 828 */ 829 if (mem_type_is_vram(old_mem_type)) { 830 mutex_lock(&xe->mem_access.vram_userfault.lock); 831 if (!list_empty(&bo->vram_userfault_link)) 832 list_del_init(&bo->vram_userfault_link); 833 mutex_unlock(&xe->mem_access.vram_userfault.lock); 834 } 835 836 return 0; 837 } 838 839 static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, 840 struct ttm_operation_ctx *ctx, 841 struct ttm_resource *new_mem, 842 struct ttm_place *hop) 843 { 844 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); 845 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); 846 struct ttm_resource *old_mem = ttm_bo->resource; 847 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM; 848 struct ttm_tt *ttm = ttm_bo->ttm; 849 struct xe_migrate *migrate = NULL; 850 struct dma_fence *fence; 851 bool move_lacks_source; 852 bool tt_has_data; 853 bool needs_clear; 854 bool handle_system_ccs = (!IS_DGFX(xe) && xe_bo_needs_ccs_pages(bo) && 855 ttm && ttm_tt_is_populated(ttm)) ? true : false; 856 int ret = 0; 857 858 /* Bo creation path, moving to system or TT. */ 859 if ((!old_mem && ttm) && !handle_system_ccs) { 860 if (new_mem->mem_type == XE_PL_TT) 861 ret = xe_tt_map_sg(xe, ttm); 862 if (!ret) 863 ttm_bo_move_null(ttm_bo, new_mem); 864 goto out; 865 } 866 867 if (ttm_bo->type == ttm_bo_type_sg) { 868 if (new_mem->mem_type == XE_PL_SYSTEM) 869 ret = xe_bo_move_notify(bo, ctx); 870 if (!ret) 871 ret = xe_bo_move_dmabuf(ttm_bo, new_mem); 872 return ret; 873 } 874 875 tt_has_data = ttm && (ttm_tt_is_populated(ttm) || ttm_tt_is_swapped(ttm)); 876 877 move_lacks_source = !old_mem || (handle_system_ccs ? (!bo->ccs_cleared) : 878 (!mem_type_is_vram(old_mem_type) && !tt_has_data)); 879 880 needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) || 881 (!ttm && ttm_bo->type == ttm_bo_type_device); 882 883 if (new_mem->mem_type == XE_PL_TT) { 884 ret = xe_tt_map_sg(xe, ttm); 885 if (ret) 886 goto out; 887 } 888 889 if ((move_lacks_source && !needs_clear)) { 890 ttm_bo_move_null(ttm_bo, new_mem); 891 goto out; 892 } 893 894 if (!move_lacks_source && (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) && 895 new_mem->mem_type == XE_PL_SYSTEM) { 896 ret = xe_svm_bo_evict(bo); 897 if (!ret) { 898 drm_dbg(&xe->drm, "Evict system allocator BO success\n"); 899 ttm_bo_move_null(ttm_bo, new_mem); 900 } else { 901 drm_dbg(&xe->drm, "Evict system allocator BO failed=%pe\n", 902 ERR_PTR(ret)); 903 } 904 905 goto out; 906 } 907 908 if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) { 909 ttm_bo_move_null(ttm_bo, new_mem); 910 goto out; 911 } 912 913 /* 914 * Failed multi-hop where the old_mem is still marked as 915 * TTM_PL_FLAG_TEMPORARY, should just be a dummy move. 916 */ 917 if (old_mem_type == XE_PL_TT && 918 new_mem->mem_type == XE_PL_TT) { 919 ttm_bo_move_null(ttm_bo, new_mem); 920 goto out; 921 } 922 923 if (!move_lacks_source && !xe_bo_is_pinned(bo)) { 924 ret = xe_bo_move_notify(bo, ctx); 925 if (ret) 926 goto out; 927 } 928 929 if (old_mem_type == XE_PL_TT && 930 new_mem->mem_type == XE_PL_SYSTEM) { 931 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv, 932 DMA_RESV_USAGE_BOOKKEEP, 933 false, 934 MAX_SCHEDULE_TIMEOUT); 935 if (timeout < 0) { 936 ret = timeout; 937 goto out; 938 } 939 940 if (!handle_system_ccs) { 941 ttm_bo_move_null(ttm_bo, new_mem); 942 goto out; 943 } 944 } 945 946 if (!move_lacks_source && 947 ((old_mem_type == XE_PL_SYSTEM && resource_is_vram(new_mem)) || 948 (mem_type_is_vram(old_mem_type) && 949 new_mem->mem_type == XE_PL_SYSTEM))) { 950 hop->fpfn = 0; 951 hop->lpfn = 0; 952 hop->mem_type = XE_PL_TT; 953 hop->flags = TTM_PL_FLAG_TEMPORARY; 954 ret = -EMULTIHOP; 955 goto out; 956 } 957 958 if (bo->tile) 959 migrate = bo->tile->migrate; 960 else if (resource_is_vram(new_mem)) 961 migrate = mem_type_to_migrate(xe, new_mem->mem_type); 962 else if (mem_type_is_vram(old_mem_type)) 963 migrate = mem_type_to_migrate(xe, old_mem_type); 964 else 965 migrate = xe->tiles[0].migrate; 966 967 xe_assert(xe, migrate); 968 trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source); 969 if (xe_rpm_reclaim_safe(xe)) { 970 /* 971 * We might be called through swapout in the validation path of 972 * another TTM device, so acquire rpm here. 973 */ 974 xe_pm_runtime_get(xe); 975 } else { 976 drm_WARN_ON(&xe->drm, handle_system_ccs); 977 xe_pm_runtime_get_noresume(xe); 978 } 979 980 if (move_lacks_source) { 981 u32 flags = 0; 982 983 if (mem_type_is_vram(new_mem->mem_type)) 984 flags |= XE_MIGRATE_CLEAR_FLAG_FULL; 985 else if (handle_system_ccs) 986 flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA; 987 988 fence = xe_migrate_clear(migrate, bo, new_mem, flags); 989 } else { 990 fence = xe_migrate_copy(migrate, bo, bo, old_mem, new_mem, 991 handle_system_ccs); 992 } 993 if (IS_ERR(fence)) { 994 ret = PTR_ERR(fence); 995 xe_pm_runtime_put(xe); 996 goto out; 997 } 998 if (!move_lacks_source) { 999 ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict, true, 1000 new_mem); 1001 if (ret) { 1002 dma_fence_wait(fence, false); 1003 ttm_bo_move_null(ttm_bo, new_mem); 1004 ret = 0; 1005 } 1006 } else { 1007 /* 1008 * ttm_bo_move_accel_cleanup() may blow up if 1009 * bo->resource == NULL, so just attach the 1010 * fence and set the new resource. 1011 */ 1012 dma_resv_add_fence(ttm_bo->base.resv, fence, 1013 DMA_RESV_USAGE_KERNEL); 1014 ttm_bo_move_null(ttm_bo, new_mem); 1015 } 1016 1017 dma_fence_put(fence); 1018 xe_pm_runtime_put(xe); 1019 1020 /* 1021 * CCS meta data is migrated from TT -> SMEM. So, let us detach the 1022 * BBs from BO as it is no longer needed. 1023 */ 1024 if (IS_VF_CCS_READY(xe) && old_mem_type == XE_PL_TT && 1025 new_mem->mem_type == XE_PL_SYSTEM) 1026 xe_sriov_vf_ccs_detach_bo(bo); 1027 1028 if (IS_VF_CCS_READY(xe) && 1029 ((move_lacks_source && new_mem->mem_type == XE_PL_TT) || 1030 (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT)) && 1031 handle_system_ccs) 1032 ret = xe_sriov_vf_ccs_attach_bo(bo); 1033 1034 out: 1035 if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) && 1036 ttm_bo->ttm) { 1037 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv, 1038 DMA_RESV_USAGE_KERNEL, 1039 false, 1040 MAX_SCHEDULE_TIMEOUT); 1041 if (timeout < 0) 1042 ret = timeout; 1043 1044 if (IS_VF_CCS_READY(xe)) 1045 xe_sriov_vf_ccs_detach_bo(bo); 1046 1047 xe_tt_unmap_sg(xe, ttm_bo->ttm); 1048 } 1049 1050 return ret; 1051 } 1052 1053 static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx, 1054 struct ttm_buffer_object *bo, 1055 unsigned long *scanned) 1056 { 1057 struct xe_device *xe = ttm_to_xe_device(bo->bdev); 1058 long lret; 1059 1060 /* Fake move to system, without copying data. */ 1061 if (bo->resource->mem_type != XE_PL_SYSTEM) { 1062 struct ttm_resource *new_resource; 1063 1064 lret = ttm_bo_wait_ctx(bo, ctx); 1065 if (lret) 1066 return lret; 1067 1068 lret = ttm_bo_mem_space(bo, &sys_placement, &new_resource, ctx); 1069 if (lret) 1070 return lret; 1071 1072 xe_tt_unmap_sg(xe, bo->ttm); 1073 ttm_bo_move_null(bo, new_resource); 1074 } 1075 1076 *scanned += bo->ttm->num_pages; 1077 lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags) 1078 {.purge = true, 1079 .writeback = false, 1080 .allow_move = false}); 1081 1082 if (lret > 0) 1083 xe_ttm_tt_account_subtract(xe, bo->ttm); 1084 1085 return lret; 1086 } 1087 1088 static bool 1089 xe_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place) 1090 { 1091 struct drm_gpuvm_bo *vm_bo; 1092 1093 if (!ttm_bo_eviction_valuable(bo, place)) 1094 return false; 1095 1096 if (!xe_bo_is_xe_bo(bo)) 1097 return true; 1098 1099 drm_gem_for_each_gpuvm_bo(vm_bo, &bo->base) { 1100 if (xe_vm_is_validating(gpuvm_to_vm(vm_bo->vm))) 1101 return false; 1102 } 1103 1104 return true; 1105 } 1106 1107 /** 1108 * xe_bo_shrink() - Try to shrink an xe bo. 1109 * @ctx: The struct ttm_operation_ctx used for shrinking. 1110 * @bo: The TTM buffer object whose pages to shrink. 1111 * @flags: Flags governing the shrink behaviour. 1112 * @scanned: Pointer to a counter of the number of pages 1113 * attempted to shrink. 1114 * 1115 * Try to shrink- or purge a bo, and if it succeeds, unmap dma. 1116 * Note that we need to be able to handle also non xe bos 1117 * (ghost bos), but only if the struct ttm_tt is embedded in 1118 * a struct xe_ttm_tt. When the function attempts to shrink 1119 * the pages of a buffer object, The value pointed to by @scanned 1120 * is updated. 1121 * 1122 * Return: The number of pages shrunken or purged, or negative error 1123 * code on failure. 1124 */ 1125 long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, 1126 const struct xe_bo_shrink_flags flags, 1127 unsigned long *scanned) 1128 { 1129 struct ttm_tt *tt = bo->ttm; 1130 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); 1131 struct ttm_place place = {.mem_type = bo->resource->mem_type}; 1132 struct xe_bo *xe_bo = ttm_to_xe_bo(bo); 1133 struct xe_device *xe = ttm_to_xe_device(bo->bdev); 1134 bool needs_rpm; 1135 long lret = 0L; 1136 1137 if (!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE) || 1138 (flags.purge && !xe_tt->purgeable)) 1139 return -EBUSY; 1140 1141 if (!xe_bo_eviction_valuable(bo, &place)) 1142 return -EBUSY; 1143 1144 if (!xe_bo_is_xe_bo(bo) || !xe_bo_get_unless_zero(xe_bo)) 1145 return xe_bo_shrink_purge(ctx, bo, scanned); 1146 1147 if (xe_tt->purgeable) { 1148 if (bo->resource->mem_type != XE_PL_SYSTEM) 1149 lret = xe_bo_move_notify(xe_bo, ctx); 1150 if (!lret) 1151 lret = xe_bo_shrink_purge(ctx, bo, scanned); 1152 goto out_unref; 1153 } 1154 1155 /* System CCS needs gpu copy when moving PL_TT -> PL_SYSTEM */ 1156 needs_rpm = (!IS_DGFX(xe) && bo->resource->mem_type != XE_PL_SYSTEM && 1157 xe_bo_needs_ccs_pages(xe_bo)); 1158 if (needs_rpm && !xe_pm_runtime_get_if_active(xe)) 1159 goto out_unref; 1160 1161 *scanned += tt->num_pages; 1162 lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags) 1163 {.purge = false, 1164 .writeback = flags.writeback, 1165 .allow_move = true}); 1166 if (needs_rpm) 1167 xe_pm_runtime_put(xe); 1168 1169 if (lret > 0) 1170 xe_ttm_tt_account_subtract(xe, tt); 1171 1172 out_unref: 1173 xe_bo_put(xe_bo); 1174 1175 return lret; 1176 } 1177 1178 /** 1179 * xe_bo_notifier_prepare_pinned() - Prepare a pinned VRAM object to be backed 1180 * up in system memory. 1181 * @bo: The buffer object to prepare. 1182 * 1183 * On successful completion, the object backup pages are allocated. Expectation 1184 * is that this is called from the PM notifier, prior to suspend/hibernation. 1185 * 1186 * Return: 0 on success. Negative error code on failure. 1187 */ 1188 int xe_bo_notifier_prepare_pinned(struct xe_bo *bo) 1189 { 1190 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); 1191 struct xe_validation_ctx ctx; 1192 struct drm_exec exec; 1193 struct xe_bo *backup; 1194 int ret = 0; 1195 1196 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) { 1197 ret = drm_exec_lock_obj(&exec, &bo->ttm.base); 1198 drm_exec_retry_on_contention(&exec); 1199 xe_assert(xe, !ret); 1200 xe_assert(xe, !bo->backup_obj); 1201 1202 /* 1203 * Since this is called from the PM notifier we might have raced with 1204 * someone unpinning this after we dropped the pinned list lock and 1205 * grabbing the above bo lock. 1206 */ 1207 if (!xe_bo_is_pinned(bo)) 1208 break; 1209 1210 if (!xe_bo_is_vram(bo)) 1211 break; 1212 1213 if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE) 1214 break; 1215 1216 backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo), 1217 DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel, 1218 XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS | 1219 XE_BO_FLAG_PINNED, &exec); 1220 if (IS_ERR(backup)) { 1221 drm_exec_retry_on_contention(&exec); 1222 ret = PTR_ERR(backup); 1223 xe_validation_retry_on_oom(&ctx, &ret); 1224 break; 1225 } 1226 1227 backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */ 1228 ttm_bo_pin(&backup->ttm); 1229 bo->backup_obj = backup; 1230 } 1231 1232 return ret; 1233 } 1234 1235 /** 1236 * xe_bo_notifier_unprepare_pinned() - Undo the previous prepare operation. 1237 * @bo: The buffer object to undo the prepare for. 1238 * 1239 * Always returns 0. The backup object is removed, if still present. Expectation 1240 * it that this called from the PM notifier when undoing the prepare step. 1241 * 1242 * Return: Always returns 0. 1243 */ 1244 int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo) 1245 { 1246 xe_bo_lock(bo, false); 1247 if (bo->backup_obj) { 1248 ttm_bo_unpin(&bo->backup_obj->ttm); 1249 xe_bo_put(bo->backup_obj); 1250 bo->backup_obj = NULL; 1251 } 1252 xe_bo_unlock(bo); 1253 1254 return 0; 1255 } 1256 1257 static int xe_bo_evict_pinned_copy(struct xe_bo *bo, struct xe_bo *backup) 1258 { 1259 struct xe_device *xe = xe_bo_device(bo); 1260 bool unmap = false; 1261 int ret = 0; 1262 1263 if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) { 1264 struct xe_migrate *migrate; 1265 struct dma_fence *fence; 1266 1267 if (bo->tile) 1268 migrate = bo->tile->migrate; 1269 else 1270 migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type); 1271 1272 xe_assert(xe, bo->ttm.base.resv == backup->ttm.base.resv); 1273 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); 1274 if (ret) 1275 goto out_backup; 1276 1277 fence = xe_migrate_copy(migrate, bo, backup, bo->ttm.resource, 1278 backup->ttm.resource, false); 1279 if (IS_ERR(fence)) { 1280 ret = PTR_ERR(fence); 1281 goto out_backup; 1282 } 1283 1284 dma_resv_add_fence(bo->ttm.base.resv, fence, 1285 DMA_RESV_USAGE_KERNEL); 1286 dma_fence_put(fence); 1287 } else { 1288 ret = xe_bo_vmap(backup); 1289 if (ret) 1290 goto out_backup; 1291 1292 if (iosys_map_is_null(&bo->vmap)) { 1293 ret = xe_bo_vmap(bo); 1294 if (ret) 1295 goto out_vunmap; 1296 unmap = true; 1297 } 1298 1299 xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0, 1300 xe_bo_size(bo)); 1301 } 1302 1303 if (!bo->backup_obj) 1304 bo->backup_obj = backup; 1305 out_vunmap: 1306 xe_bo_vunmap(backup); 1307 out_backup: 1308 if (unmap) 1309 xe_bo_vunmap(bo); 1310 1311 return ret; 1312 } 1313 1314 /** 1315 * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory 1316 * @bo: The buffer object to move. 1317 * 1318 * On successful completion, the object memory will be moved to system memory. 1319 * 1320 * This is needed to for special handling of pinned VRAM object during 1321 * suspend-resume. 1322 * 1323 * Return: 0 on success. Negative error code on failure. 1324 */ 1325 int xe_bo_evict_pinned(struct xe_bo *bo) 1326 { 1327 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); 1328 struct xe_validation_ctx ctx; 1329 struct drm_exec exec; 1330 struct xe_bo *backup = bo->backup_obj; 1331 bool backup_created = false; 1332 int ret = 0; 1333 1334 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) { 1335 ret = drm_exec_lock_obj(&exec, &bo->ttm.base); 1336 drm_exec_retry_on_contention(&exec); 1337 xe_assert(xe, !ret); 1338 1339 if (WARN_ON(!bo->ttm.resource)) { 1340 ret = -EINVAL; 1341 break; 1342 } 1343 1344 if (WARN_ON(!xe_bo_is_pinned(bo))) { 1345 ret = -EINVAL; 1346 break; 1347 } 1348 1349 if (!xe_bo_is_vram(bo)) 1350 break; 1351 1352 if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE) 1353 break; 1354 1355 if (!backup) { 1356 backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, 1357 xe_bo_size(bo), 1358 DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel, 1359 XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS | 1360 XE_BO_FLAG_PINNED, &exec); 1361 if (IS_ERR(backup)) { 1362 drm_exec_retry_on_contention(&exec); 1363 ret = PTR_ERR(backup); 1364 xe_validation_retry_on_oom(&ctx, &ret); 1365 break; 1366 } 1367 backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */ 1368 backup_created = true; 1369 } 1370 1371 ret = xe_bo_evict_pinned_copy(bo, backup); 1372 } 1373 1374 if (ret && backup_created) 1375 xe_bo_put(backup); 1376 1377 return ret; 1378 } 1379 1380 /** 1381 * xe_bo_restore_pinned() - Restore a pinned VRAM object 1382 * @bo: The buffer object to move. 1383 * 1384 * On successful completion, the object memory will be moved back to VRAM. 1385 * 1386 * This is needed to for special handling of pinned VRAM object during 1387 * suspend-resume. 1388 * 1389 * Return: 0 on success. Negative error code on failure. 1390 */ 1391 int xe_bo_restore_pinned(struct xe_bo *bo) 1392 { 1393 struct ttm_operation_ctx ctx = { 1394 .interruptible = false, 1395 .gfp_retry_mayfail = false, 1396 }; 1397 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); 1398 struct xe_bo *backup = bo->backup_obj; 1399 bool unmap = false; 1400 int ret; 1401 1402 if (!backup) 1403 return 0; 1404 1405 xe_bo_lock(bo, false); 1406 1407 if (!xe_bo_is_pinned(backup)) { 1408 ret = ttm_bo_validate(&backup->ttm, &backup->placement, &ctx); 1409 if (ret) 1410 goto out_unlock_bo; 1411 } 1412 1413 if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) { 1414 struct xe_migrate *migrate; 1415 struct dma_fence *fence; 1416 1417 if (bo->tile) 1418 migrate = bo->tile->migrate; 1419 else 1420 migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type); 1421 1422 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); 1423 if (ret) 1424 goto out_unlock_bo; 1425 1426 fence = xe_migrate_copy(migrate, backup, bo, 1427 backup->ttm.resource, bo->ttm.resource, 1428 false); 1429 if (IS_ERR(fence)) { 1430 ret = PTR_ERR(fence); 1431 goto out_unlock_bo; 1432 } 1433 1434 dma_resv_add_fence(bo->ttm.base.resv, fence, 1435 DMA_RESV_USAGE_KERNEL); 1436 dma_fence_put(fence); 1437 } else { 1438 ret = xe_bo_vmap(backup); 1439 if (ret) 1440 goto out_unlock_bo; 1441 1442 if (iosys_map_is_null(&bo->vmap)) { 1443 ret = xe_bo_vmap(bo); 1444 if (ret) 1445 goto out_backup; 1446 unmap = true; 1447 } 1448 1449 xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr, 1450 xe_bo_size(bo)); 1451 } 1452 1453 bo->backup_obj = NULL; 1454 1455 out_backup: 1456 xe_bo_vunmap(backup); 1457 if (!bo->backup_obj) { 1458 if (xe_bo_is_pinned(backup)) 1459 ttm_bo_unpin(&backup->ttm); 1460 xe_bo_put(backup); 1461 } 1462 out_unlock_bo: 1463 if (unmap) 1464 xe_bo_vunmap(bo); 1465 xe_bo_unlock(bo); 1466 return ret; 1467 } 1468 1469 int xe_bo_dma_unmap_pinned(struct xe_bo *bo) 1470 { 1471 struct ttm_buffer_object *ttm_bo = &bo->ttm; 1472 struct ttm_tt *tt = ttm_bo->ttm; 1473 1474 if (tt) { 1475 struct xe_ttm_tt *xe_tt = container_of(tt, typeof(*xe_tt), ttm); 1476 1477 if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) { 1478 dma_buf_unmap_attachment(ttm_bo->base.import_attach, 1479 ttm_bo->sg, 1480 DMA_BIDIRECTIONAL); 1481 ttm_bo->sg = NULL; 1482 xe_tt->sg = NULL; 1483 } else if (xe_tt->sg) { 1484 dma_unmap_sgtable(ttm_to_xe_device(ttm_bo->bdev)->drm.dev, 1485 xe_tt->sg, 1486 DMA_BIDIRECTIONAL, 0); 1487 sg_free_table(xe_tt->sg); 1488 xe_tt->sg = NULL; 1489 } 1490 } 1491 1492 return 0; 1493 } 1494 1495 static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo, 1496 unsigned long page_offset) 1497 { 1498 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); 1499 struct xe_res_cursor cursor; 1500 struct xe_vram_region *vram; 1501 1502 if (ttm_bo->resource->mem_type == XE_PL_STOLEN) 1503 return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT; 1504 1505 vram = res_to_mem_region(ttm_bo->resource); 1506 xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor); 1507 return (vram->io_start + cursor.start) >> PAGE_SHIFT; 1508 } 1509 1510 static void __xe_bo_vunmap(struct xe_bo *bo); 1511 1512 /* 1513 * TODO: Move this function to TTM so we don't rely on how TTM does its 1514 * locking, thereby abusing TTM internals. 1515 */ 1516 static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo) 1517 { 1518 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); 1519 bool locked; 1520 1521 xe_assert(xe, !kref_read(&ttm_bo->kref)); 1522 1523 /* 1524 * We can typically only race with TTM trylocking under the 1525 * lru_lock, which will immediately be unlocked again since 1526 * the ttm_bo refcount is zero at this point. So trylocking *should* 1527 * always succeed here, as long as we hold the lru lock. 1528 */ 1529 spin_lock(&ttm_bo->bdev->lru_lock); 1530 locked = dma_resv_trylock(ttm_bo->base.resv); 1531 spin_unlock(&ttm_bo->bdev->lru_lock); 1532 xe_assert(xe, locked); 1533 1534 return locked; 1535 } 1536 1537 static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo) 1538 { 1539 struct dma_resv_iter cursor; 1540 struct dma_fence *fence; 1541 struct dma_fence *replacement = NULL; 1542 struct xe_bo *bo; 1543 1544 if (!xe_bo_is_xe_bo(ttm_bo)) 1545 return; 1546 1547 bo = ttm_to_xe_bo(ttm_bo); 1548 xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount))); 1549 1550 /* 1551 * Corner case where TTM fails to allocate memory and this BOs resv 1552 * still points the VMs resv 1553 */ 1554 if (ttm_bo->base.resv != &ttm_bo->base._resv) 1555 return; 1556 1557 if (!xe_ttm_bo_lock_in_destructor(ttm_bo)) 1558 return; 1559 1560 /* 1561 * Scrub the preempt fences if any. The unbind fence is already 1562 * attached to the resv. 1563 * TODO: Don't do this for external bos once we scrub them after 1564 * unbind. 1565 */ 1566 dma_resv_for_each_fence(&cursor, ttm_bo->base.resv, 1567 DMA_RESV_USAGE_BOOKKEEP, fence) { 1568 if (xe_fence_is_xe_preempt(fence) && 1569 !dma_fence_is_signaled(fence)) { 1570 if (!replacement) 1571 replacement = dma_fence_get_stub(); 1572 1573 dma_resv_replace_fences(ttm_bo->base.resv, 1574 fence->context, 1575 replacement, 1576 DMA_RESV_USAGE_BOOKKEEP); 1577 } 1578 } 1579 dma_fence_put(replacement); 1580 1581 dma_resv_unlock(ttm_bo->base.resv); 1582 } 1583 1584 static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo) 1585 { 1586 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); 1587 1588 if (!xe_bo_is_xe_bo(ttm_bo)) 1589 return; 1590 1591 if (IS_VF_CCS_READY(ttm_to_xe_device(ttm_bo->bdev))) 1592 xe_sriov_vf_ccs_detach_bo(bo); 1593 1594 /* 1595 * Object is idle and about to be destroyed. Release the 1596 * dma-buf attachment. 1597 */ 1598 if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) { 1599 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, 1600 struct xe_ttm_tt, ttm); 1601 1602 dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg, 1603 DMA_BIDIRECTIONAL); 1604 ttm_bo->sg = NULL; 1605 xe_tt->sg = NULL; 1606 } 1607 } 1608 1609 static void xe_ttm_bo_purge(struct ttm_buffer_object *ttm_bo, struct ttm_operation_ctx *ctx) 1610 { 1611 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); 1612 1613 if (ttm_bo->ttm) { 1614 struct ttm_placement place = {}; 1615 int ret = ttm_bo_validate(ttm_bo, &place, ctx); 1616 1617 drm_WARN_ON(&xe->drm, ret); 1618 } 1619 } 1620 1621 static void xe_ttm_bo_swap_notify(struct ttm_buffer_object *ttm_bo) 1622 { 1623 struct ttm_operation_ctx ctx = { 1624 .interruptible = false, 1625 .gfp_retry_mayfail = false, 1626 }; 1627 1628 if (ttm_bo->ttm) { 1629 struct xe_ttm_tt *xe_tt = 1630 container_of(ttm_bo->ttm, struct xe_ttm_tt, ttm); 1631 1632 if (xe_tt->purgeable) 1633 xe_ttm_bo_purge(ttm_bo, &ctx); 1634 } 1635 } 1636 1637 static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo, 1638 unsigned long offset, void *buf, int len, 1639 int write) 1640 { 1641 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); 1642 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); 1643 struct iosys_map vmap; 1644 struct xe_res_cursor cursor; 1645 struct xe_vram_region *vram; 1646 int bytes_left = len; 1647 int err = 0; 1648 1649 xe_bo_assert_held(bo); 1650 xe_device_assert_mem_access(xe); 1651 1652 if (!mem_type_is_vram(ttm_bo->resource->mem_type)) 1653 return -EIO; 1654 1655 if (!xe_bo_is_visible_vram(bo) || len >= SZ_16K) { 1656 struct xe_migrate *migrate = 1657 mem_type_to_migrate(xe, ttm_bo->resource->mem_type); 1658 1659 err = xe_migrate_access_memory(migrate, bo, offset, buf, len, 1660 write); 1661 goto out; 1662 } 1663 1664 vram = res_to_mem_region(ttm_bo->resource); 1665 xe_res_first(ttm_bo->resource, offset & PAGE_MASK, 1666 xe_bo_size(bo) - (offset & PAGE_MASK), &cursor); 1667 1668 do { 1669 unsigned long page_offset = (offset & ~PAGE_MASK); 1670 int byte_count = min((int)(PAGE_SIZE - page_offset), bytes_left); 1671 1672 iosys_map_set_vaddr_iomem(&vmap, (u8 __iomem *)vram->mapping + 1673 cursor.start); 1674 if (write) 1675 xe_map_memcpy_to(xe, &vmap, page_offset, buf, byte_count); 1676 else 1677 xe_map_memcpy_from(xe, buf, &vmap, page_offset, byte_count); 1678 1679 buf += byte_count; 1680 offset += byte_count; 1681 bytes_left -= byte_count; 1682 if (bytes_left) 1683 xe_res_next(&cursor, PAGE_SIZE); 1684 } while (bytes_left); 1685 1686 out: 1687 return err ?: len; 1688 } 1689 1690 const struct ttm_device_funcs xe_ttm_funcs = { 1691 .ttm_tt_create = xe_ttm_tt_create, 1692 .ttm_tt_populate = xe_ttm_tt_populate, 1693 .ttm_tt_unpopulate = xe_ttm_tt_unpopulate, 1694 .ttm_tt_destroy = xe_ttm_tt_destroy, 1695 .evict_flags = xe_evict_flags, 1696 .move = xe_bo_move, 1697 .io_mem_reserve = xe_ttm_io_mem_reserve, 1698 .io_mem_pfn = xe_ttm_io_mem_pfn, 1699 .access_memory = xe_ttm_access_memory, 1700 .release_notify = xe_ttm_bo_release_notify, 1701 .eviction_valuable = xe_bo_eviction_valuable, 1702 .delete_mem_notify = xe_ttm_bo_delete_mem_notify, 1703 .swap_notify = xe_ttm_bo_swap_notify, 1704 }; 1705 1706 static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo) 1707 { 1708 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); 1709 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); 1710 struct xe_tile *tile; 1711 u8 id; 1712 1713 if (bo->ttm.base.import_attach) 1714 drm_prime_gem_destroy(&bo->ttm.base, NULL); 1715 drm_gem_object_release(&bo->ttm.base); 1716 1717 xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list)); 1718 1719 for_each_tile(tile, xe, id) 1720 if (bo->ggtt_node[id] && bo->ggtt_node[id]->base.size) 1721 xe_ggtt_remove_bo(tile->mem.ggtt, bo); 1722 1723 #ifdef CONFIG_PROC_FS 1724 if (bo->client) 1725 xe_drm_client_remove_bo(bo); 1726 #endif 1727 1728 if (bo->vm && xe_bo_is_user(bo)) 1729 xe_vm_put(bo->vm); 1730 1731 if (bo->parent_obj) 1732 xe_bo_put(bo->parent_obj); 1733 1734 mutex_lock(&xe->mem_access.vram_userfault.lock); 1735 if (!list_empty(&bo->vram_userfault_link)) 1736 list_del(&bo->vram_userfault_link); 1737 mutex_unlock(&xe->mem_access.vram_userfault.lock); 1738 1739 kfree(bo); 1740 } 1741 1742 static void xe_gem_object_free(struct drm_gem_object *obj) 1743 { 1744 /* Our BO reference counting scheme works as follows: 1745 * 1746 * The gem object kref is typically used throughout the driver, 1747 * and the gem object holds a ttm_buffer_object refcount, so 1748 * that when the last gem object reference is put, which is when 1749 * we end up in this function, we put also that ttm_buffer_object 1750 * refcount. Anything using gem interfaces is then no longer 1751 * allowed to access the object in a way that requires a gem 1752 * refcount, including locking the object. 1753 * 1754 * driver ttm callbacks is allowed to use the ttm_buffer_object 1755 * refcount directly if needed. 1756 */ 1757 __xe_bo_vunmap(gem_to_xe_bo(obj)); 1758 ttm_bo_fini(container_of(obj, struct ttm_buffer_object, base)); 1759 } 1760 1761 static void xe_gem_object_close(struct drm_gem_object *obj, 1762 struct drm_file *file_priv) 1763 { 1764 struct xe_bo *bo = gem_to_xe_bo(obj); 1765 1766 if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) { 1767 xe_assert(xe_bo_device(bo), xe_bo_is_user(bo)); 1768 1769 xe_bo_lock(bo, false); 1770 ttm_bo_set_bulk_move(&bo->ttm, NULL); 1771 xe_bo_unlock(bo); 1772 } 1773 } 1774 1775 static bool should_migrate_to_smem(struct xe_bo *bo) 1776 { 1777 /* 1778 * NOTE: The following atomic checks are platform-specific. For example, 1779 * if a device supports CXL atomics, these may not be necessary or 1780 * may behave differently. 1781 */ 1782 1783 return bo->attr.atomic_access == DRM_XE_ATOMIC_GLOBAL || 1784 bo->attr.atomic_access == DRM_XE_ATOMIC_CPU; 1785 } 1786 1787 static int xe_bo_wait_usage_kernel(struct xe_bo *bo, struct ttm_operation_ctx *ctx) 1788 { 1789 long lerr; 1790 1791 if (ctx->no_wait_gpu) 1792 return dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL) ? 1793 0 : -EBUSY; 1794 1795 lerr = dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL, 1796 ctx->interruptible, MAX_SCHEDULE_TIMEOUT); 1797 if (lerr < 0) 1798 return lerr; 1799 if (lerr == 0) 1800 return -EBUSY; 1801 1802 return 0; 1803 } 1804 1805 /* Populate the bo if swapped out, or migrate if the access mode requires that. */ 1806 static int xe_bo_fault_migrate(struct xe_bo *bo, struct ttm_operation_ctx *ctx, 1807 struct drm_exec *exec) 1808 { 1809 struct ttm_buffer_object *tbo = &bo->ttm; 1810 int err = 0; 1811 1812 if (ttm_manager_type(tbo->bdev, tbo->resource->mem_type)->use_tt) { 1813 err = xe_bo_wait_usage_kernel(bo, ctx); 1814 if (!err) 1815 err = ttm_bo_populate(&bo->ttm, ctx); 1816 } else if (should_migrate_to_smem(bo)) { 1817 xe_assert(xe_bo_device(bo), bo->flags & XE_BO_FLAG_SYSTEM); 1818 err = xe_bo_migrate(bo, XE_PL_TT, ctx, exec); 1819 } 1820 1821 return err; 1822 } 1823 1824 /* Call into TTM to populate PTEs, and register bo for PTE removal on runtime suspend. */ 1825 static vm_fault_t __xe_bo_cpu_fault(struct vm_fault *vmf, struct xe_device *xe, struct xe_bo *bo) 1826 { 1827 vm_fault_t ret; 1828 1829 trace_xe_bo_cpu_fault(bo); 1830 1831 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, 1832 TTM_BO_VM_NUM_PREFAULT); 1833 /* 1834 * When TTM is actually called to insert PTEs, ensure no blocking conditions 1835 * remain, in which case TTM may drop locks and return VM_FAULT_RETRY. 1836 */ 1837 xe_assert(xe, ret != VM_FAULT_RETRY); 1838 1839 if (ret == VM_FAULT_NOPAGE && 1840 mem_type_is_vram(bo->ttm.resource->mem_type)) { 1841 mutex_lock(&xe->mem_access.vram_userfault.lock); 1842 if (list_empty(&bo->vram_userfault_link)) 1843 list_add(&bo->vram_userfault_link, 1844 &xe->mem_access.vram_userfault.list); 1845 mutex_unlock(&xe->mem_access.vram_userfault.lock); 1846 } 1847 1848 return ret; 1849 } 1850 1851 static vm_fault_t xe_err_to_fault_t(int err) 1852 { 1853 switch (err) { 1854 case 0: 1855 case -EINTR: 1856 case -ERESTARTSYS: 1857 case -EAGAIN: 1858 return VM_FAULT_NOPAGE; 1859 case -ENOMEM: 1860 case -ENOSPC: 1861 return VM_FAULT_OOM; 1862 default: 1863 break; 1864 } 1865 return VM_FAULT_SIGBUS; 1866 } 1867 1868 static bool xe_ttm_bo_is_imported(struct ttm_buffer_object *tbo) 1869 { 1870 dma_resv_assert_held(tbo->base.resv); 1871 1872 return tbo->ttm && 1873 (tbo->ttm->page_flags & (TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE)) == 1874 TTM_TT_FLAG_EXTERNAL; 1875 } 1876 1877 static vm_fault_t xe_bo_cpu_fault_fastpath(struct vm_fault *vmf, struct xe_device *xe, 1878 struct xe_bo *bo, bool needs_rpm) 1879 { 1880 struct ttm_buffer_object *tbo = &bo->ttm; 1881 vm_fault_t ret = VM_FAULT_RETRY; 1882 struct xe_validation_ctx ctx; 1883 struct ttm_operation_ctx tctx = { 1884 .interruptible = true, 1885 .no_wait_gpu = true, 1886 .gfp_retry_mayfail = true, 1887 1888 }; 1889 int err; 1890 1891 if (needs_rpm && !xe_pm_runtime_get_if_active(xe)) 1892 return VM_FAULT_RETRY; 1893 1894 err = xe_validation_ctx_init(&ctx, &xe->val, NULL, 1895 (struct xe_val_flags) { 1896 .interruptible = true, 1897 .no_block = true 1898 }); 1899 if (err) 1900 goto out_pm; 1901 1902 if (!dma_resv_trylock(tbo->base.resv)) 1903 goto out_validation; 1904 1905 if (xe_ttm_bo_is_imported(tbo)) { 1906 ret = VM_FAULT_SIGBUS; 1907 drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n"); 1908 goto out_unlock; 1909 } 1910 1911 err = xe_bo_fault_migrate(bo, &tctx, NULL); 1912 if (err) { 1913 /* Return VM_FAULT_RETRY on these errors. */ 1914 if (err != -ENOMEM && err != -ENOSPC && err != -EBUSY) 1915 ret = xe_err_to_fault_t(err); 1916 goto out_unlock; 1917 } 1918 1919 if (dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL)) 1920 ret = __xe_bo_cpu_fault(vmf, xe, bo); 1921 1922 out_unlock: 1923 dma_resv_unlock(tbo->base.resv); 1924 out_validation: 1925 xe_validation_ctx_fini(&ctx); 1926 out_pm: 1927 if (needs_rpm) 1928 xe_pm_runtime_put(xe); 1929 1930 return ret; 1931 } 1932 1933 static vm_fault_t xe_bo_cpu_fault(struct vm_fault *vmf) 1934 { 1935 struct ttm_buffer_object *tbo = vmf->vma->vm_private_data; 1936 struct drm_device *ddev = tbo->base.dev; 1937 struct xe_device *xe = to_xe_device(ddev); 1938 struct xe_bo *bo = ttm_to_xe_bo(tbo); 1939 bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK; 1940 bool retry_after_wait = false; 1941 struct xe_validation_ctx ctx; 1942 struct drm_exec exec; 1943 vm_fault_t ret; 1944 int err = 0; 1945 int idx; 1946 1947 if (!drm_dev_enter(&xe->drm, &idx)) 1948 return ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); 1949 1950 ret = xe_bo_cpu_fault_fastpath(vmf, xe, bo, needs_rpm); 1951 if (ret != VM_FAULT_RETRY) 1952 goto out; 1953 1954 if (fault_flag_allow_retry_first(vmf->flags)) { 1955 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) 1956 goto out; 1957 retry_after_wait = true; 1958 xe_bo_get(bo); 1959 mmap_read_unlock(vmf->vma->vm_mm); 1960 } else { 1961 ret = VM_FAULT_NOPAGE; 1962 } 1963 1964 /* 1965 * The fastpath failed and we were not required to return and retry immediately. 1966 * We're now running in one of two modes: 1967 * 1968 * 1) retry_after_wait == true: The mmap_read_lock() is dropped, and we're trying 1969 * to resolve blocking waits. But we can't resolve the fault since the 1970 * mmap_read_lock() is dropped. After retrying the fault, the aim is that the fastpath 1971 * should succeed. But it may fail since we drop the bo lock. 1972 * 1973 * 2) retry_after_wait == false: The fastpath failed, typically even after 1974 * a retry. Do whatever's necessary to resolve the fault. 1975 * 1976 * This construct is recommended to avoid excessive waits under the mmap_lock. 1977 */ 1978 1979 if (needs_rpm) 1980 xe_pm_runtime_get(xe); 1981 1982 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true}, 1983 err) { 1984 struct ttm_operation_ctx tctx = { 1985 .interruptible = true, 1986 .no_wait_gpu = false, 1987 .gfp_retry_mayfail = retry_after_wait, 1988 }; 1989 1990 err = drm_exec_lock_obj(&exec, &tbo->base); 1991 drm_exec_retry_on_contention(&exec); 1992 if (err) 1993 break; 1994 1995 if (xe_ttm_bo_is_imported(tbo)) { 1996 err = -EFAULT; 1997 drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n"); 1998 break; 1999 } 2000 2001 err = xe_bo_fault_migrate(bo, &tctx, &exec); 2002 if (err) { 2003 drm_exec_retry_on_contention(&exec); 2004 xe_validation_retry_on_oom(&ctx, &err); 2005 break; 2006 } 2007 2008 err = xe_bo_wait_usage_kernel(bo, &tctx); 2009 if (err) 2010 break; 2011 2012 if (!retry_after_wait) 2013 ret = __xe_bo_cpu_fault(vmf, xe, bo); 2014 } 2015 /* if retry_after_wait == true, we *must* return VM_FAULT_RETRY. */ 2016 if (err && !retry_after_wait) 2017 ret = xe_err_to_fault_t(err); 2018 2019 if (needs_rpm) 2020 xe_pm_runtime_put(xe); 2021 2022 if (retry_after_wait) 2023 xe_bo_put(bo); 2024 out: 2025 drm_dev_exit(idx); 2026 2027 return ret; 2028 } 2029 2030 static int xe_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, 2031 void *buf, int len, int write) 2032 { 2033 struct ttm_buffer_object *ttm_bo = vma->vm_private_data; 2034 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); 2035 struct xe_device *xe = xe_bo_device(bo); 2036 int ret; 2037 2038 xe_pm_runtime_get(xe); 2039 ret = ttm_bo_vm_access(vma, addr, buf, len, write); 2040 xe_pm_runtime_put(xe); 2041 2042 return ret; 2043 } 2044 2045 /** 2046 * xe_bo_read() - Read from an xe_bo 2047 * @bo: The buffer object to read from. 2048 * @offset: The byte offset to start reading from. 2049 * @dst: Location to store the read. 2050 * @size: Size in bytes for the read. 2051 * 2052 * Read @size bytes from the @bo, starting from @offset, storing into @dst. 2053 * 2054 * Return: Zero on success, or negative error. 2055 */ 2056 int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size) 2057 { 2058 int ret; 2059 2060 ret = ttm_bo_access(&bo->ttm, offset, dst, size, 0); 2061 if (ret >= 0 && ret != size) 2062 ret = -EIO; 2063 else if (ret == size) 2064 ret = 0; 2065 2066 return ret; 2067 } 2068 2069 static const struct vm_operations_struct xe_gem_vm_ops = { 2070 .fault = xe_bo_cpu_fault, 2071 .open = ttm_bo_vm_open, 2072 .close = ttm_bo_vm_close, 2073 .access = xe_bo_vm_access, 2074 }; 2075 2076 static const struct drm_gem_object_funcs xe_gem_object_funcs = { 2077 .free = xe_gem_object_free, 2078 .close = xe_gem_object_close, 2079 .mmap = drm_gem_ttm_mmap, 2080 .export = xe_gem_prime_export, 2081 .vm_ops = &xe_gem_vm_ops, 2082 }; 2083 2084 /** 2085 * xe_bo_alloc - Allocate storage for a struct xe_bo 2086 * 2087 * This function is intended to allocate storage to be used for input 2088 * to __xe_bo_create_locked(), in the case a pointer to the bo to be 2089 * created is needed before the call to __xe_bo_create_locked(). 2090 * If __xe_bo_create_locked ends up never to be called, then the 2091 * storage allocated with this function needs to be freed using 2092 * xe_bo_free(). 2093 * 2094 * Return: A pointer to an uninitialized struct xe_bo on success, 2095 * ERR_PTR(-ENOMEM) on error. 2096 */ 2097 struct xe_bo *xe_bo_alloc(void) 2098 { 2099 struct xe_bo *bo = kzalloc(sizeof(*bo), GFP_KERNEL); 2100 2101 if (!bo) 2102 return ERR_PTR(-ENOMEM); 2103 2104 return bo; 2105 } 2106 2107 /** 2108 * xe_bo_free - Free storage allocated using xe_bo_alloc() 2109 * @bo: The buffer object storage. 2110 * 2111 * Refer to xe_bo_alloc() documentation for valid use-cases. 2112 */ 2113 void xe_bo_free(struct xe_bo *bo) 2114 { 2115 kfree(bo); 2116 } 2117 2118 /** 2119 * xe_bo_init_locked() - Initialize or create an xe_bo. 2120 * @xe: The xe device. 2121 * @bo: An already allocated buffer object or NULL 2122 * if the function should allocate a new one. 2123 * @tile: The tile to select for migration of this bo, and the tile used for 2124 * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. 2125 * @resv: Pointer to a locked shared reservation object to use for this bo, 2126 * or NULL for the xe_bo to use its own. 2127 * @bulk: The bulk move to use for LRU bumping, or NULL for external bos. 2128 * @size: The storage size to use for the bo. 2129 * @cpu_caching: The cpu caching used for system memory backing store. 2130 * @type: The TTM buffer object type. 2131 * @flags: XE_BO_FLAG_ flags. 2132 * @exec: The drm_exec transaction to use for exhaustive eviction. 2133 * 2134 * Initialize or create an xe buffer object. On failure, any allocated buffer 2135 * object passed in @bo will have been unreferenced. 2136 * 2137 * Return: The buffer object on success. Negative error pointer on failure. 2138 */ 2139 struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo, 2140 struct xe_tile *tile, struct dma_resv *resv, 2141 struct ttm_lru_bulk_move *bulk, size_t size, 2142 u16 cpu_caching, enum ttm_bo_type type, 2143 u32 flags, struct drm_exec *exec) 2144 { 2145 struct ttm_operation_ctx ctx = { 2146 .interruptible = true, 2147 .no_wait_gpu = false, 2148 .gfp_retry_mayfail = true, 2149 }; 2150 struct ttm_placement *placement; 2151 uint32_t alignment; 2152 size_t aligned_size; 2153 int err; 2154 2155 /* Only kernel objects should set GT */ 2156 xe_assert(xe, !tile || type == ttm_bo_type_kernel); 2157 2158 if (XE_WARN_ON(!size)) { 2159 xe_bo_free(bo); 2160 return ERR_PTR(-EINVAL); 2161 } 2162 2163 /* XE_BO_FLAG_GGTTx requires XE_BO_FLAG_GGTT also be set */ 2164 if ((flags & XE_BO_FLAG_GGTT_ALL) && !(flags & XE_BO_FLAG_GGTT)) 2165 return ERR_PTR(-EINVAL); 2166 2167 if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) && 2168 !(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) && 2169 ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) || 2170 (flags & (XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_NEEDS_2M)))) { 2171 size_t align = flags & XE_BO_FLAG_NEEDS_2M ? SZ_2M : SZ_64K; 2172 2173 aligned_size = ALIGN(size, align); 2174 if (type != ttm_bo_type_device) 2175 size = ALIGN(size, align); 2176 flags |= XE_BO_FLAG_INTERNAL_64K; 2177 alignment = align >> PAGE_SHIFT; 2178 } else { 2179 aligned_size = ALIGN(size, SZ_4K); 2180 flags &= ~XE_BO_FLAG_INTERNAL_64K; 2181 alignment = SZ_4K >> PAGE_SHIFT; 2182 } 2183 2184 if (type == ttm_bo_type_device && aligned_size != size) 2185 return ERR_PTR(-EINVAL); 2186 2187 if (!bo) { 2188 bo = xe_bo_alloc(); 2189 if (IS_ERR(bo)) 2190 return bo; 2191 } 2192 2193 bo->ccs_cleared = false; 2194 bo->tile = tile; 2195 bo->flags = flags; 2196 bo->cpu_caching = cpu_caching; 2197 bo->ttm.base.funcs = &xe_gem_object_funcs; 2198 bo->ttm.priority = XE_BO_PRIORITY_NORMAL; 2199 INIT_LIST_HEAD(&bo->pinned_link); 2200 #ifdef CONFIG_PROC_FS 2201 INIT_LIST_HEAD(&bo->client_link); 2202 #endif 2203 INIT_LIST_HEAD(&bo->vram_userfault_link); 2204 2205 drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size); 2206 2207 if (resv) { 2208 ctx.allow_res_evict = !(flags & XE_BO_FLAG_NO_RESV_EVICT); 2209 ctx.resv = resv; 2210 } 2211 2212 xe_validation_assert_exec(xe, exec, &bo->ttm.base); 2213 if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) { 2214 err = __xe_bo_placement_for_flags(xe, bo, bo->flags, type); 2215 if (WARN_ON(err)) { 2216 xe_ttm_bo_destroy(&bo->ttm); 2217 return ERR_PTR(err); 2218 } 2219 } 2220 2221 /* Defer populating type_sg bos */ 2222 placement = (type == ttm_bo_type_sg || 2223 bo->flags & XE_BO_FLAG_DEFER_BACKING) ? &sys_placement : 2224 &bo->placement; 2225 err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type, 2226 placement, alignment, 2227 &ctx, NULL, resv, xe_ttm_bo_destroy); 2228 if (err) 2229 return ERR_PTR(err); 2230 2231 /* 2232 * The VRAM pages underneath are potentially still being accessed by the 2233 * GPU, as per async GPU clearing and async evictions. However TTM makes 2234 * sure to add any corresponding move/clear fences into the objects 2235 * dma-resv using the DMA_RESV_USAGE_KERNEL slot. 2236 * 2237 * For KMD internal buffers we don't care about GPU clearing, however we 2238 * still need to handle async evictions, where the VRAM is still being 2239 * accessed by the GPU. Most internal callers are not expecting this, 2240 * since they are missing the required synchronisation before accessing 2241 * the memory. To keep things simple just sync wait any kernel fences 2242 * here, if the buffer is designated KMD internal. 2243 * 2244 * For normal userspace objects we should already have the required 2245 * pipelining or sync waiting elsewhere, since we already have to deal 2246 * with things like async GPU clearing. 2247 */ 2248 if (type == ttm_bo_type_kernel) { 2249 long timeout = dma_resv_wait_timeout(bo->ttm.base.resv, 2250 DMA_RESV_USAGE_KERNEL, 2251 ctx.interruptible, 2252 MAX_SCHEDULE_TIMEOUT); 2253 2254 if (timeout < 0) { 2255 if (!resv) 2256 dma_resv_unlock(bo->ttm.base.resv); 2257 xe_bo_put(bo); 2258 return ERR_PTR(timeout); 2259 } 2260 } 2261 2262 bo->created = true; 2263 if (bulk) 2264 ttm_bo_set_bulk_move(&bo->ttm, bulk); 2265 else 2266 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); 2267 2268 return bo; 2269 } 2270 2271 static int __xe_bo_fixed_placement(struct xe_device *xe, 2272 struct xe_bo *bo, enum ttm_bo_type type, 2273 u32 flags, 2274 u64 start, u64 end, u64 size) 2275 { 2276 struct ttm_place *place = bo->placements; 2277 u32 vram_flag, vram_stolen_flags; 2278 2279 /* 2280 * to allow fixed placement in GGTT of a VF, post-migration fixups would have to 2281 * include selecting a new fixed offset and shifting the page ranges for it 2282 */ 2283 xe_assert(xe, !IS_SRIOV_VF(xe) || !(bo->flags & XE_BO_FLAG_GGTT)); 2284 2285 if (flags & (XE_BO_FLAG_USER | XE_BO_FLAG_SYSTEM)) 2286 return -EINVAL; 2287 2288 vram_flag = flags & XE_BO_FLAG_VRAM_MASK; 2289 vram_stolen_flags = (flags & (XE_BO_FLAG_STOLEN)) | vram_flag; 2290 2291 /* check if more than one VRAM/STOLEN flag is set */ 2292 if (hweight32(vram_stolen_flags) > 1) 2293 return -EINVAL; 2294 2295 place->flags = TTM_PL_FLAG_CONTIGUOUS; 2296 place->fpfn = start >> PAGE_SHIFT; 2297 place->lpfn = end >> PAGE_SHIFT; 2298 2299 if (flags & XE_BO_FLAG_STOLEN) 2300 place->mem_type = XE_PL_STOLEN; 2301 else 2302 place->mem_type = bo_vram_flags_to_vram_placement(xe, flags, vram_flag, type); 2303 2304 bo->placement = (struct ttm_placement) { 2305 .num_placement = 1, 2306 .placement = place, 2307 }; 2308 2309 return 0; 2310 } 2311 2312 static struct xe_bo * 2313 __xe_bo_create_locked(struct xe_device *xe, 2314 struct xe_tile *tile, struct xe_vm *vm, 2315 size_t size, u64 start, u64 end, 2316 u16 cpu_caching, enum ttm_bo_type type, u32 flags, 2317 u64 alignment, struct drm_exec *exec) 2318 { 2319 struct xe_bo *bo = NULL; 2320 int err; 2321 2322 if (vm) 2323 xe_vm_assert_held(vm); 2324 2325 if (start || end != ~0ULL) { 2326 bo = xe_bo_alloc(); 2327 if (IS_ERR(bo)) 2328 return bo; 2329 2330 flags |= XE_BO_FLAG_FIXED_PLACEMENT; 2331 err = __xe_bo_fixed_placement(xe, bo, type, flags, start, end, size); 2332 if (err) { 2333 xe_bo_free(bo); 2334 return ERR_PTR(err); 2335 } 2336 } 2337 2338 bo = xe_bo_init_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL, 2339 vm && !xe_vm_in_fault_mode(vm) && 2340 flags & XE_BO_FLAG_USER ? 2341 &vm->lru_bulk_move : NULL, size, 2342 cpu_caching, type, flags, exec); 2343 if (IS_ERR(bo)) 2344 return bo; 2345 2346 bo->min_align = alignment; 2347 2348 /* 2349 * Note that instead of taking a reference no the drm_gpuvm_resv_bo(), 2350 * to ensure the shared resv doesn't disappear under the bo, the bo 2351 * will keep a reference to the vm, and avoid circular references 2352 * by having all the vm's bo refereferences released at vm close 2353 * time. 2354 */ 2355 if (vm && xe_bo_is_user(bo)) 2356 xe_vm_get(vm); 2357 bo->vm = vm; 2358 2359 if (bo->flags & XE_BO_FLAG_GGTT) { 2360 struct xe_tile *t; 2361 u8 id; 2362 2363 if (!(bo->flags & XE_BO_FLAG_GGTT_ALL)) { 2364 if (!tile && flags & XE_BO_FLAG_STOLEN) 2365 tile = xe_device_get_root_tile(xe); 2366 2367 xe_assert(xe, tile); 2368 } 2369 2370 for_each_tile(t, xe, id) { 2371 if (t != tile && !(bo->flags & XE_BO_FLAG_GGTTx(t))) 2372 continue; 2373 2374 if (flags & XE_BO_FLAG_FIXED_PLACEMENT) { 2375 err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo, 2376 start + xe_bo_size(bo), U64_MAX, 2377 exec); 2378 } else { 2379 err = xe_ggtt_insert_bo(t->mem.ggtt, bo, exec); 2380 } 2381 if (err) 2382 goto err_unlock_put_bo; 2383 } 2384 } 2385 2386 trace_xe_bo_create(bo); 2387 return bo; 2388 2389 err_unlock_put_bo: 2390 __xe_bo_unset_bulk_move(bo); 2391 xe_bo_unlock_vm_held(bo); 2392 xe_bo_put(bo); 2393 return ERR_PTR(err); 2394 } 2395 2396 /** 2397 * xe_bo_create_locked() - Create a BO 2398 * @xe: The xe device. 2399 * @tile: The tile to select for migration of this bo, and the tile used for 2400 * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. 2401 * @vm: The local vm or NULL for external objects. 2402 * @size: The storage size to use for the bo. 2403 * @type: The TTM buffer object type. 2404 * @flags: XE_BO_FLAG_ flags. 2405 * @exec: The drm_exec transaction to use for exhaustive eviction. 2406 * 2407 * Create a locked xe BO with no range- nor alignment restrictions. 2408 * 2409 * Return: The buffer object on success. Negative error pointer on failure. 2410 */ 2411 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, 2412 struct xe_vm *vm, size_t size, 2413 enum ttm_bo_type type, u32 flags, 2414 struct drm_exec *exec) 2415 { 2416 return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, 2417 flags, 0, exec); 2418 } 2419 2420 static struct xe_bo *xe_bo_create_novm(struct xe_device *xe, struct xe_tile *tile, 2421 size_t size, u16 cpu_caching, 2422 enum ttm_bo_type type, u32 flags, 2423 u64 alignment, bool intr) 2424 { 2425 struct xe_validation_ctx ctx; 2426 struct drm_exec exec; 2427 struct xe_bo *bo; 2428 int ret = 0; 2429 2430 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr}, 2431 ret) { 2432 bo = __xe_bo_create_locked(xe, tile, NULL, size, 0, ~0ULL, 2433 cpu_caching, type, flags, alignment, &exec); 2434 drm_exec_retry_on_contention(&exec); 2435 if (IS_ERR(bo)) { 2436 ret = PTR_ERR(bo); 2437 xe_validation_retry_on_oom(&ctx, &ret); 2438 } else { 2439 xe_bo_unlock(bo); 2440 } 2441 } 2442 2443 return ret ? ERR_PTR(ret) : bo; 2444 } 2445 2446 /** 2447 * xe_bo_create_user() - Create a user BO 2448 * @xe: The xe device. 2449 * @vm: The local vm or NULL for external objects. 2450 * @size: The storage size to use for the bo. 2451 * @cpu_caching: The caching mode to be used for system backing store. 2452 * @flags: XE_BO_FLAG_ flags. 2453 * @exec: The drm_exec transaction to use for exhaustive eviction, or NULL 2454 * if such a transaction should be initiated by the call. 2455 * 2456 * Create a bo on behalf of user-space. 2457 * 2458 * Return: The buffer object on success. Negative error pointer on failure. 2459 */ 2460 struct xe_bo *xe_bo_create_user(struct xe_device *xe, 2461 struct xe_vm *vm, size_t size, 2462 u16 cpu_caching, 2463 u32 flags, struct drm_exec *exec) 2464 { 2465 struct xe_bo *bo; 2466 2467 flags |= XE_BO_FLAG_USER; 2468 2469 if (vm || exec) { 2470 xe_assert(xe, exec); 2471 bo = __xe_bo_create_locked(xe, NULL, vm, size, 0, ~0ULL, 2472 cpu_caching, ttm_bo_type_device, 2473 flags, 0, exec); 2474 if (!IS_ERR(bo)) 2475 xe_bo_unlock_vm_held(bo); 2476 } else { 2477 bo = xe_bo_create_novm(xe, NULL, size, cpu_caching, 2478 ttm_bo_type_device, flags, 0, true); 2479 } 2480 2481 return bo; 2482 } 2483 2484 /** 2485 * xe_bo_create_pin_range_novm() - Create and pin a BO with range options. 2486 * @xe: The xe device. 2487 * @tile: The tile to select for migration of this bo, and the tile used for 2488 * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. 2489 * @size: The storage size to use for the bo. 2490 * @start: Start of fixed VRAM range or 0. 2491 * @end: End of fixed VRAM range or ~0ULL. 2492 * @type: The TTM buffer object type. 2493 * @flags: XE_BO_FLAG_ flags. 2494 * 2495 * Create an Xe BO with range- and options. If @start and @end indicate 2496 * a fixed VRAM range, this must be a ttm_bo_type_kernel bo with VRAM placement 2497 * only. 2498 * 2499 * Return: The buffer object on success. Negative error pointer on failure. 2500 */ 2501 struct xe_bo *xe_bo_create_pin_range_novm(struct xe_device *xe, struct xe_tile *tile, 2502 size_t size, u64 start, u64 end, 2503 enum ttm_bo_type type, u32 flags) 2504 { 2505 struct xe_validation_ctx ctx; 2506 struct drm_exec exec; 2507 struct xe_bo *bo; 2508 int err = 0; 2509 2510 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) { 2511 bo = __xe_bo_create_locked(xe, tile, NULL, size, start, end, 2512 0, type, flags, 0, &exec); 2513 if (IS_ERR(bo)) { 2514 drm_exec_retry_on_contention(&exec); 2515 err = PTR_ERR(bo); 2516 xe_validation_retry_on_oom(&ctx, &err); 2517 break; 2518 } 2519 2520 err = xe_bo_pin(bo, &exec); 2521 xe_bo_unlock(bo); 2522 if (err) { 2523 xe_bo_put(bo); 2524 drm_exec_retry_on_contention(&exec); 2525 xe_validation_retry_on_oom(&ctx, &err); 2526 break; 2527 } 2528 } 2529 2530 return err ? ERR_PTR(err) : bo; 2531 } 2532 2533 static struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, 2534 struct xe_tile *tile, 2535 struct xe_vm *vm, 2536 size_t size, u64 offset, 2537 enum ttm_bo_type type, u32 flags, 2538 u64 alignment, struct drm_exec *exec) 2539 { 2540 struct xe_bo *bo; 2541 int err; 2542 u64 start = offset == ~0ull ? 0 : offset; 2543 u64 end = offset == ~0ull ? ~0ull : start + size; 2544 2545 if (flags & XE_BO_FLAG_STOLEN && 2546 xe_ttm_stolen_cpu_access_needs_ggtt(xe)) 2547 flags |= XE_BO_FLAG_GGTT; 2548 2549 bo = __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, 2550 flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED, 2551 alignment, exec); 2552 if (IS_ERR(bo)) 2553 return bo; 2554 2555 err = xe_bo_pin(bo, exec); 2556 if (err) 2557 goto err_put; 2558 2559 err = xe_bo_vmap(bo); 2560 if (err) 2561 goto err_unpin; 2562 2563 xe_bo_unlock_vm_held(bo); 2564 2565 return bo; 2566 2567 err_unpin: 2568 xe_bo_unpin(bo); 2569 err_put: 2570 xe_bo_unlock_vm_held(bo); 2571 xe_bo_put(bo); 2572 return ERR_PTR(err); 2573 } 2574 2575 /** 2576 * xe_bo_create_pin_map_at_novm() - Create pinned and mapped bo at optional VRAM offset 2577 * @xe: The xe device. 2578 * @tile: The tile to select for migration of this bo, and the tile used for 2579 * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. 2580 * @size: The storage size to use for the bo. 2581 * @offset: Optional VRAM offset or %~0ull for don't care. 2582 * @type: The TTM buffer object type. 2583 * @flags: XE_BO_FLAG_ flags. 2584 * @alignment: GGTT alignment. 2585 * @intr: Whether to execute any waits for backing store interruptible. 2586 * 2587 * Create a pinned and optionally mapped bo with VRAM offset and GGTT alignment 2588 * options. The bo will be external and not associated with a VM. 2589 * 2590 * Return: The buffer object on success. Negative error pointer on failure. 2591 * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set 2592 * to true on entry. 2593 */ 2594 struct xe_bo * 2595 xe_bo_create_pin_map_at_novm(struct xe_device *xe, struct xe_tile *tile, 2596 size_t size, u64 offset, enum ttm_bo_type type, u32 flags, 2597 u64 alignment, bool intr) 2598 { 2599 struct xe_validation_ctx ctx; 2600 struct drm_exec exec; 2601 struct xe_bo *bo; 2602 int ret = 0; 2603 2604 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr}, 2605 ret) { 2606 bo = xe_bo_create_pin_map_at_aligned(xe, tile, NULL, size, offset, 2607 type, flags, alignment, &exec); 2608 if (IS_ERR(bo)) { 2609 drm_exec_retry_on_contention(&exec); 2610 ret = PTR_ERR(bo); 2611 xe_validation_retry_on_oom(&ctx, &ret); 2612 } 2613 } 2614 2615 return ret ? ERR_PTR(ret) : bo; 2616 } 2617 2618 /** 2619 * xe_bo_create_pin_map() - Create pinned and mapped bo 2620 * @xe: The xe device. 2621 * @tile: The tile to select for migration of this bo, and the tile used for 2622 * @vm: The vm to associate the buffer object with. The vm's resv must be locked 2623 * with the transaction represented by @exec. 2624 * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. 2625 * @size: The storage size to use for the bo. 2626 * @type: The TTM buffer object type. 2627 * @flags: XE_BO_FLAG_ flags. 2628 * @exec: The drm_exec transaction to use for exhaustive eviction, and 2629 * previously used for locking @vm's resv. 2630 * 2631 * Create a pinned and mapped bo. The bo will be external and not associated 2632 * with a VM. 2633 * 2634 * Return: The buffer object on success. Negative error pointer on failure. 2635 * In particular, the function may return ERR_PTR(%-EINTR) if @exec was 2636 * configured for interruptible locking. 2637 */ 2638 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, 2639 struct xe_vm *vm, size_t size, 2640 enum ttm_bo_type type, u32 flags, 2641 struct drm_exec *exec) 2642 { 2643 return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, ~0ull, type, flags, 2644 0, exec); 2645 } 2646 2647 /** 2648 * xe_bo_create_pin_map_novm() - Create pinned and mapped bo 2649 * @xe: The xe device. 2650 * @tile: The tile to select for migration of this bo, and the tile used for 2651 * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos. 2652 * @size: The storage size to use for the bo. 2653 * @type: The TTM buffer object type. 2654 * @flags: XE_BO_FLAG_ flags. 2655 * @intr: Whether to execute any waits for backing store interruptible. 2656 * 2657 * Create a pinned and mapped bo. The bo will be external and not associated 2658 * with a VM. 2659 * 2660 * Return: The buffer object on success. Negative error pointer on failure. 2661 * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set 2662 * to true on entry. 2663 */ 2664 struct xe_bo *xe_bo_create_pin_map_novm(struct xe_device *xe, struct xe_tile *tile, 2665 size_t size, enum ttm_bo_type type, u32 flags, 2666 bool intr) 2667 { 2668 return xe_bo_create_pin_map_at_novm(xe, tile, size, ~0ull, type, flags, 0, intr); 2669 } 2670 2671 static void __xe_bo_unpin_map_no_vm(void *arg) 2672 { 2673 xe_bo_unpin_map_no_vm(arg); 2674 } 2675 2676 struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, 2677 size_t size, u32 flags) 2678 { 2679 struct xe_bo *bo; 2680 int ret; 2681 2682 KUNIT_STATIC_STUB_REDIRECT(xe_managed_bo_create_pin_map, xe, tile, size, flags); 2683 bo = xe_bo_create_pin_map_novm(xe, tile, size, ttm_bo_type_kernel, flags, true); 2684 if (IS_ERR(bo)) 2685 return bo; 2686 2687 ret = devm_add_action_or_reset(xe->drm.dev, __xe_bo_unpin_map_no_vm, bo); 2688 if (ret) 2689 return ERR_PTR(ret); 2690 2691 return bo; 2692 } 2693 2694 void xe_managed_bo_unpin_map_no_vm(struct xe_bo *bo) 2695 { 2696 devm_release_action(xe_bo_device(bo)->drm.dev, __xe_bo_unpin_map_no_vm, bo); 2697 } 2698 2699 struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, 2700 const void *data, size_t size, u32 flags) 2701 { 2702 struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, ALIGN(size, PAGE_SIZE), flags); 2703 2704 if (IS_ERR(bo)) 2705 return bo; 2706 2707 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); 2708 2709 return bo; 2710 } 2711 2712 /** 2713 * xe_managed_bo_reinit_in_vram 2714 * @xe: xe device 2715 * @tile: Tile where the new buffer will be created 2716 * @src: Managed buffer object allocated in system memory 2717 * 2718 * Replace a managed src buffer object allocated in system memory with a new 2719 * one allocated in vram, copying the data between them. 2720 * Buffer object in VRAM is not going to have the same GGTT address, the caller 2721 * is responsible for making sure that any old references to it are updated. 2722 * 2723 * Returns 0 for success, negative error code otherwise. 2724 */ 2725 int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src) 2726 { 2727 struct xe_bo *bo; 2728 u32 dst_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT; 2729 2730 dst_flags |= (*src)->flags & (XE_BO_FLAG_GGTT_INVALIDATE | 2731 XE_BO_FLAG_PINNED_NORESTORE); 2732 2733 xe_assert(xe, IS_DGFX(xe)); 2734 xe_assert(xe, !(*src)->vmap.is_iomem); 2735 2736 bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, 2737 xe_bo_size(*src), dst_flags); 2738 if (IS_ERR(bo)) 2739 return PTR_ERR(bo); 2740 2741 devm_release_action(xe->drm.dev, __xe_bo_unpin_map_no_vm, *src); 2742 *src = bo; 2743 2744 return 0; 2745 } 2746 2747 /* 2748 * XXX: This is in the VM bind data path, likely should calculate this once and 2749 * store, with a recalculation if the BO is moved. 2750 */ 2751 uint64_t vram_region_gpu_offset(struct ttm_resource *res) 2752 { 2753 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); 2754 2755 switch (res->mem_type) { 2756 case XE_PL_STOLEN: 2757 return xe_ttm_stolen_gpu_offset(xe); 2758 case XE_PL_TT: 2759 case XE_PL_SYSTEM: 2760 return 0; 2761 default: 2762 return res_to_mem_region(res)->dpa_base; 2763 } 2764 return 0; 2765 } 2766 2767 /** 2768 * xe_bo_pin_external - pin an external BO 2769 * @bo: buffer object to be pinned 2770 * @in_place: Pin in current placement, don't attempt to migrate. 2771 * @exec: The drm_exec transaction to use for exhaustive eviction. 2772 * 2773 * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD) 2774 * BO. Unique call compared to xe_bo_pin as this function has it own set of 2775 * asserts and code to ensure evict / restore on suspend / resume. 2776 * 2777 * Returns 0 for success, negative error code otherwise. 2778 */ 2779 int xe_bo_pin_external(struct xe_bo *bo, bool in_place, struct drm_exec *exec) 2780 { 2781 struct xe_device *xe = xe_bo_device(bo); 2782 int err; 2783 2784 xe_assert(xe, !bo->vm); 2785 xe_assert(xe, xe_bo_is_user(bo)); 2786 2787 if (!xe_bo_is_pinned(bo)) { 2788 if (!in_place) { 2789 err = xe_bo_validate(bo, NULL, false, exec); 2790 if (err) 2791 return err; 2792 } 2793 2794 spin_lock(&xe->pinned.lock); 2795 list_add_tail(&bo->pinned_link, &xe->pinned.late.external); 2796 spin_unlock(&xe->pinned.lock); 2797 } 2798 2799 ttm_bo_pin(&bo->ttm); 2800 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) 2801 xe_ttm_tt_account_subtract(xe, bo->ttm.ttm); 2802 2803 /* 2804 * FIXME: If we always use the reserve / unreserve functions for locking 2805 * we do not need this. 2806 */ 2807 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); 2808 2809 return 0; 2810 } 2811 2812 /** 2813 * xe_bo_pin() - Pin a kernel bo after potentially migrating it 2814 * @bo: The kernel bo to pin. 2815 * @exec: The drm_exec transaction to use for exhaustive eviction. 2816 * 2817 * Attempts to migrate a bo to @bo->placement. If that succeeds, 2818 * pins the bo. 2819 * 2820 * Return: %0 on success, negative error code on migration failure. 2821 */ 2822 int xe_bo_pin(struct xe_bo *bo, struct drm_exec *exec) 2823 { 2824 struct ttm_place *place = &bo->placements[0]; 2825 struct xe_device *xe = xe_bo_device(bo); 2826 int err; 2827 2828 /* We currently don't expect user BO to be pinned */ 2829 xe_assert(xe, !xe_bo_is_user(bo)); 2830 2831 /* Pinned object must be in GGTT or have pinned flag */ 2832 xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED | 2833 XE_BO_FLAG_GGTT)); 2834 2835 /* 2836 * No reason we can't support pinning imported dma-bufs we just don't 2837 * expect to pin an imported dma-buf. 2838 */ 2839 xe_assert(xe, !bo->ttm.base.import_attach); 2840 2841 /* We only expect at most 1 pin */ 2842 xe_assert(xe, !xe_bo_is_pinned(bo)); 2843 2844 err = xe_bo_validate(bo, NULL, false, exec); 2845 if (err) 2846 return err; 2847 2848 if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) { 2849 spin_lock(&xe->pinned.lock); 2850 if (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE) 2851 list_add_tail(&bo->pinned_link, &xe->pinned.late.kernel_bo_present); 2852 else 2853 list_add_tail(&bo->pinned_link, &xe->pinned.early.kernel_bo_present); 2854 spin_unlock(&xe->pinned.lock); 2855 } 2856 2857 ttm_bo_pin(&bo->ttm); 2858 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) 2859 xe_ttm_tt_account_subtract(xe, bo->ttm.ttm); 2860 2861 /* 2862 * FIXME: If we always use the reserve / unreserve functions for locking 2863 * we do not need this. 2864 */ 2865 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); 2866 2867 return 0; 2868 } 2869 2870 /** 2871 * xe_bo_unpin_external - unpin an external BO 2872 * @bo: buffer object to be unpinned 2873 * 2874 * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD) 2875 * BO. Unique call compared to xe_bo_unpin as this function has it own set of 2876 * asserts and code to ensure evict / restore on suspend / resume. 2877 * 2878 * Returns 0 for success, negative error code otherwise. 2879 */ 2880 void xe_bo_unpin_external(struct xe_bo *bo) 2881 { 2882 struct xe_device *xe = xe_bo_device(bo); 2883 2884 xe_assert(xe, !bo->vm); 2885 xe_assert(xe, xe_bo_is_pinned(bo)); 2886 xe_assert(xe, xe_bo_is_user(bo)); 2887 2888 spin_lock(&xe->pinned.lock); 2889 if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) 2890 list_del_init(&bo->pinned_link); 2891 spin_unlock(&xe->pinned.lock); 2892 2893 ttm_bo_unpin(&bo->ttm); 2894 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) 2895 xe_ttm_tt_account_add(xe, bo->ttm.ttm); 2896 2897 /* 2898 * FIXME: If we always use the reserve / unreserve functions for locking 2899 * we do not need this. 2900 */ 2901 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); 2902 } 2903 2904 void xe_bo_unpin(struct xe_bo *bo) 2905 { 2906 struct ttm_place *place = &bo->placements[0]; 2907 struct xe_device *xe = xe_bo_device(bo); 2908 2909 xe_assert(xe, !bo->ttm.base.import_attach); 2910 xe_assert(xe, xe_bo_is_pinned(bo)); 2911 2912 if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) { 2913 spin_lock(&xe->pinned.lock); 2914 xe_assert(xe, !list_empty(&bo->pinned_link)); 2915 list_del_init(&bo->pinned_link); 2916 spin_unlock(&xe->pinned.lock); 2917 2918 if (bo->backup_obj) { 2919 if (xe_bo_is_pinned(bo->backup_obj)) 2920 ttm_bo_unpin(&bo->backup_obj->ttm); 2921 xe_bo_put(bo->backup_obj); 2922 bo->backup_obj = NULL; 2923 } 2924 } 2925 ttm_bo_unpin(&bo->ttm); 2926 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) 2927 xe_ttm_tt_account_add(xe, bo->ttm.ttm); 2928 } 2929 2930 /** 2931 * xe_bo_validate() - Make sure the bo is in an allowed placement 2932 * @bo: The bo, 2933 * @vm: Pointer to a the vm the bo shares a locked dma_resv object with, or 2934 * NULL. Used together with @allow_res_evict. 2935 * @allow_res_evict: Whether it's allowed to evict bos sharing @vm's 2936 * reservation object. 2937 * @exec: The drm_exec transaction to use for exhaustive eviction. 2938 * 2939 * Make sure the bo is in allowed placement, migrating it if necessary. If 2940 * needed, other bos will be evicted. If bos selected for eviction shares 2941 * the @vm's reservation object, they can be evicted iff @allow_res_evict is 2942 * set to true, otherwise they will be bypassed. 2943 * 2944 * Return: 0 on success, negative error code on failure. May return 2945 * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal. 2946 */ 2947 int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict, 2948 struct drm_exec *exec) 2949 { 2950 struct ttm_operation_ctx ctx = { 2951 .interruptible = true, 2952 .no_wait_gpu = false, 2953 .gfp_retry_mayfail = true, 2954 }; 2955 int ret; 2956 2957 if (xe_bo_is_pinned(bo)) 2958 return 0; 2959 2960 if (vm) { 2961 lockdep_assert_held(&vm->lock); 2962 xe_vm_assert_held(vm); 2963 2964 ctx.allow_res_evict = allow_res_evict; 2965 ctx.resv = xe_vm_resv(vm); 2966 } 2967 2968 xe_vm_set_validating(vm, allow_res_evict); 2969 trace_xe_bo_validate(bo); 2970 xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base); 2971 ret = ttm_bo_validate(&bo->ttm, &bo->placement, &ctx); 2972 xe_vm_clear_validating(vm, allow_res_evict); 2973 2974 return ret; 2975 } 2976 2977 bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo) 2978 { 2979 if (bo->destroy == &xe_ttm_bo_destroy) 2980 return true; 2981 2982 return false; 2983 } 2984 2985 /* 2986 * Resolve a BO address. There is no assert to check if the proper lock is held 2987 * so it should only be used in cases where it is not fatal to get the wrong 2988 * address, such as printing debug information, but not in cases where memory is 2989 * written based on this result. 2990 */ 2991 dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size) 2992 { 2993 struct xe_device *xe = xe_bo_device(bo); 2994 struct xe_res_cursor cur; 2995 u64 page; 2996 2997 xe_assert(xe, page_size <= PAGE_SIZE); 2998 page = offset >> PAGE_SHIFT; 2999 offset &= (PAGE_SIZE - 1); 3000 3001 if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) { 3002 xe_assert(xe, bo->ttm.ttm); 3003 3004 xe_res_first_sg(xe_bo_sg(bo), page << PAGE_SHIFT, 3005 page_size, &cur); 3006 return xe_res_dma(&cur) + offset; 3007 } else { 3008 struct xe_res_cursor cur; 3009 3010 xe_res_first(bo->ttm.resource, page << PAGE_SHIFT, 3011 page_size, &cur); 3012 return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource); 3013 } 3014 } 3015 3016 dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size) 3017 { 3018 if (!READ_ONCE(bo->ttm.pin_count)) 3019 xe_bo_assert_held(bo); 3020 return __xe_bo_addr(bo, offset, page_size); 3021 } 3022 3023 int xe_bo_vmap(struct xe_bo *bo) 3024 { 3025 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); 3026 void *virtual; 3027 bool is_iomem; 3028 int ret; 3029 3030 xe_bo_assert_held(bo); 3031 3032 if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) || 3033 !force_contiguous(bo->flags))) 3034 return -EINVAL; 3035 3036 if (!iosys_map_is_null(&bo->vmap)) 3037 return 0; 3038 3039 /* 3040 * We use this more or less deprecated interface for now since 3041 * ttm_bo_vmap() doesn't offer the optimization of kmapping 3042 * single page bos, which is done here. 3043 * TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap 3044 * to use struct iosys_map. 3045 */ 3046 ret = ttm_bo_kmap(&bo->ttm, 0, xe_bo_size(bo) >> PAGE_SHIFT, &bo->kmap); 3047 if (ret) 3048 return ret; 3049 3050 virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 3051 if (is_iomem) 3052 iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual); 3053 else 3054 iosys_map_set_vaddr(&bo->vmap, virtual); 3055 3056 return 0; 3057 } 3058 3059 static void __xe_bo_vunmap(struct xe_bo *bo) 3060 { 3061 if (!iosys_map_is_null(&bo->vmap)) { 3062 iosys_map_clear(&bo->vmap); 3063 ttm_bo_kunmap(&bo->kmap); 3064 } 3065 } 3066 3067 void xe_bo_vunmap(struct xe_bo *bo) 3068 { 3069 xe_bo_assert_held(bo); 3070 __xe_bo_vunmap(bo); 3071 } 3072 3073 static int gem_create_set_pxp_type(struct xe_device *xe, struct xe_bo *bo, u64 value) 3074 { 3075 if (value == DRM_XE_PXP_TYPE_NONE) 3076 return 0; 3077 3078 /* we only support DRM_XE_PXP_TYPE_HWDRM for now */ 3079 if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM)) 3080 return -EINVAL; 3081 3082 return xe_pxp_key_assign(xe->pxp, bo); 3083 } 3084 3085 typedef int (*xe_gem_create_set_property_fn)(struct xe_device *xe, 3086 struct xe_bo *bo, 3087 u64 value); 3088 3089 static const xe_gem_create_set_property_fn gem_create_set_property_funcs[] = { 3090 [DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE] = gem_create_set_pxp_type, 3091 }; 3092 3093 static int gem_create_user_ext_set_property(struct xe_device *xe, 3094 struct xe_bo *bo, 3095 u64 extension) 3096 { 3097 u64 __user *address = u64_to_user_ptr(extension); 3098 struct drm_xe_ext_set_property ext; 3099 int err; 3100 u32 idx; 3101 3102 err = copy_from_user(&ext, address, sizeof(ext)); 3103 if (XE_IOCTL_DBG(xe, err)) 3104 return -EFAULT; 3105 3106 if (XE_IOCTL_DBG(xe, ext.property >= 3107 ARRAY_SIZE(gem_create_set_property_funcs)) || 3108 XE_IOCTL_DBG(xe, ext.pad) || 3109 XE_IOCTL_DBG(xe, ext.property != DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY)) 3110 return -EINVAL; 3111 3112 idx = array_index_nospec(ext.property, ARRAY_SIZE(gem_create_set_property_funcs)); 3113 if (!gem_create_set_property_funcs[idx]) 3114 return -EINVAL; 3115 3116 return gem_create_set_property_funcs[idx](xe, bo, ext.value); 3117 } 3118 3119 typedef int (*xe_gem_create_user_extension_fn)(struct xe_device *xe, 3120 struct xe_bo *bo, 3121 u64 extension); 3122 3123 static const xe_gem_create_user_extension_fn gem_create_user_extension_funcs[] = { 3124 [DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY] = gem_create_user_ext_set_property, 3125 }; 3126 3127 #define MAX_USER_EXTENSIONS 16 3128 static int gem_create_user_extensions(struct xe_device *xe, struct xe_bo *bo, 3129 u64 extensions, int ext_number) 3130 { 3131 u64 __user *address = u64_to_user_ptr(extensions); 3132 struct drm_xe_user_extension ext; 3133 int err; 3134 u32 idx; 3135 3136 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS)) 3137 return -E2BIG; 3138 3139 err = copy_from_user(&ext, address, sizeof(ext)); 3140 if (XE_IOCTL_DBG(xe, err)) 3141 return -EFAULT; 3142 3143 if (XE_IOCTL_DBG(xe, ext.pad) || 3144 XE_IOCTL_DBG(xe, ext.name >= ARRAY_SIZE(gem_create_user_extension_funcs))) 3145 return -EINVAL; 3146 3147 idx = array_index_nospec(ext.name, 3148 ARRAY_SIZE(gem_create_user_extension_funcs)); 3149 err = gem_create_user_extension_funcs[idx](xe, bo, extensions); 3150 if (XE_IOCTL_DBG(xe, err)) 3151 return err; 3152 3153 if (ext.next_extension) 3154 return gem_create_user_extensions(xe, bo, ext.next_extension, 3155 ++ext_number); 3156 3157 return 0; 3158 } 3159 3160 int xe_gem_create_ioctl(struct drm_device *dev, void *data, 3161 struct drm_file *file) 3162 { 3163 struct xe_device *xe = to_xe_device(dev); 3164 struct xe_file *xef = to_xe_file(file); 3165 struct drm_xe_gem_create *args = data; 3166 struct xe_validation_ctx ctx; 3167 struct drm_exec exec; 3168 struct xe_vm *vm = NULL; 3169 struct xe_bo *bo; 3170 unsigned int bo_flags; 3171 u32 handle; 3172 int err; 3173 3174 if (XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || 3175 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 3176 return -EINVAL; 3177 3178 /* at least one valid memory placement must be specified */ 3179 if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) || 3180 !args->placement)) 3181 return -EINVAL; 3182 3183 if (XE_IOCTL_DBG(xe, args->flags & 3184 ~(DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING | 3185 DRM_XE_GEM_CREATE_FLAG_SCANOUT | 3186 DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM))) 3187 return -EINVAL; 3188 3189 if (XE_IOCTL_DBG(xe, args->handle)) 3190 return -EINVAL; 3191 3192 if (XE_IOCTL_DBG(xe, !args->size)) 3193 return -EINVAL; 3194 3195 if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX)) 3196 return -EINVAL; 3197 3198 if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK)) 3199 return -EINVAL; 3200 3201 bo_flags = 0; 3202 if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING) 3203 bo_flags |= XE_BO_FLAG_DEFER_BACKING; 3204 3205 if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT) 3206 bo_flags |= XE_BO_FLAG_SCANOUT; 3207 3208 bo_flags |= args->placement << (ffs(XE_BO_FLAG_SYSTEM) - 1); 3209 3210 /* CCS formats need physical placement at a 64K alignment in VRAM. */ 3211 if ((bo_flags & XE_BO_FLAG_VRAM_MASK) && 3212 (bo_flags & XE_BO_FLAG_SCANOUT) && 3213 !(xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) && 3214 IS_ALIGNED(args->size, SZ_64K)) 3215 bo_flags |= XE_BO_FLAG_NEEDS_64K; 3216 3217 if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) { 3218 if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK))) 3219 return -EINVAL; 3220 3221 bo_flags |= XE_BO_FLAG_NEEDS_CPU_ACCESS; 3222 } 3223 3224 if (XE_IOCTL_DBG(xe, !args->cpu_caching || 3225 args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC)) 3226 return -EINVAL; 3227 3228 if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_VRAM_MASK && 3229 args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC)) 3230 return -EINVAL; 3231 3232 if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_SCANOUT && 3233 args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) 3234 return -EINVAL; 3235 3236 if (args->vm_id) { 3237 vm = xe_vm_lookup(xef, args->vm_id); 3238 if (XE_IOCTL_DBG(xe, !vm)) 3239 return -ENOENT; 3240 } 3241 3242 err = 0; 3243 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true}, 3244 err) { 3245 if (vm) { 3246 err = xe_vm_drm_exec_lock(vm, &exec); 3247 drm_exec_retry_on_contention(&exec); 3248 if (err) 3249 break; 3250 } 3251 bo = xe_bo_create_user(xe, vm, args->size, args->cpu_caching, 3252 bo_flags, &exec); 3253 drm_exec_retry_on_contention(&exec); 3254 if (IS_ERR(bo)) { 3255 err = PTR_ERR(bo); 3256 xe_validation_retry_on_oom(&ctx, &err); 3257 break; 3258 } 3259 } 3260 if (err) 3261 goto out_vm; 3262 3263 if (args->extensions) { 3264 err = gem_create_user_extensions(xe, bo, args->extensions, 0); 3265 if (err) 3266 goto out_bulk; 3267 } 3268 3269 err = drm_gem_handle_create(file, &bo->ttm.base, &handle); 3270 if (err) 3271 goto out_bulk; 3272 3273 args->handle = handle; 3274 goto out_put; 3275 3276 out_bulk: 3277 if (vm && !xe_vm_in_fault_mode(vm)) { 3278 xe_vm_lock(vm, false); 3279 __xe_bo_unset_bulk_move(bo); 3280 xe_vm_unlock(vm); 3281 } 3282 out_put: 3283 xe_bo_put(bo); 3284 out_vm: 3285 if (vm) 3286 xe_vm_put(vm); 3287 3288 return err; 3289 } 3290 3291 int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, 3292 struct drm_file *file) 3293 { 3294 struct xe_device *xe = to_xe_device(dev); 3295 struct drm_xe_gem_mmap_offset *args = data; 3296 struct drm_gem_object *gem_obj; 3297 3298 if (XE_IOCTL_DBG(xe, args->extensions) || 3299 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 3300 return -EINVAL; 3301 3302 if (XE_IOCTL_DBG(xe, args->flags & 3303 ~DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER)) 3304 return -EINVAL; 3305 3306 if (args->flags & DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER) { 3307 if (XE_IOCTL_DBG(xe, !IS_DGFX(xe))) 3308 return -EINVAL; 3309 3310 if (XE_IOCTL_DBG(xe, args->handle)) 3311 return -EINVAL; 3312 3313 if (XE_IOCTL_DBG(xe, PAGE_SIZE > SZ_4K)) 3314 return -EINVAL; 3315 3316 BUILD_BUG_ON(((XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT) + 3317 SZ_4K) >= DRM_FILE_PAGE_OFFSET_START); 3318 args->offset = XE_PCI_BARRIER_MMAP_OFFSET; 3319 return 0; 3320 } 3321 3322 gem_obj = drm_gem_object_lookup(file, args->handle); 3323 if (XE_IOCTL_DBG(xe, !gem_obj)) 3324 return -ENOENT; 3325 3326 /* The mmap offset was set up at BO allocation time. */ 3327 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); 3328 3329 xe_bo_put(gem_to_xe_bo(gem_obj)); 3330 return 0; 3331 } 3332 3333 /** 3334 * xe_bo_lock() - Lock the buffer object's dma_resv object 3335 * @bo: The struct xe_bo whose lock is to be taken 3336 * @intr: Whether to perform any wait interruptible 3337 * 3338 * Locks the buffer object's dma_resv object. If the buffer object is 3339 * pointing to a shared dma_resv object, that shared lock is locked. 3340 * 3341 * Return: 0 on success, -EINTR if @intr is true and the wait for a 3342 * contended lock was interrupted. If @intr is set to false, the 3343 * function always returns 0. 3344 */ 3345 int xe_bo_lock(struct xe_bo *bo, bool intr) 3346 { 3347 if (intr) 3348 return dma_resv_lock_interruptible(bo->ttm.base.resv, NULL); 3349 3350 dma_resv_lock(bo->ttm.base.resv, NULL); 3351 3352 return 0; 3353 } 3354 3355 /** 3356 * xe_bo_unlock() - Unlock the buffer object's dma_resv object 3357 * @bo: The struct xe_bo whose lock is to be released. 3358 * 3359 * Unlock a buffer object lock that was locked by xe_bo_lock(). 3360 */ 3361 void xe_bo_unlock(struct xe_bo *bo) 3362 { 3363 dma_resv_unlock(bo->ttm.base.resv); 3364 } 3365 3366 /** 3367 * xe_bo_can_migrate - Whether a buffer object likely can be migrated 3368 * @bo: The buffer object to migrate 3369 * @mem_type: The TTM memory type intended to migrate to 3370 * 3371 * Check whether the buffer object supports migration to the 3372 * given memory type. Note that pinning may affect the ability to migrate as 3373 * returned by this function. 3374 * 3375 * This function is primarily intended as a helper for checking the 3376 * possibility to migrate buffer objects and can be called without 3377 * the object lock held. 3378 * 3379 * Return: true if migration is possible, false otherwise. 3380 */ 3381 bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type) 3382 { 3383 unsigned int cur_place; 3384 3385 if (bo->ttm.type == ttm_bo_type_kernel) 3386 return true; 3387 3388 if (bo->ttm.type == ttm_bo_type_sg) 3389 return false; 3390 3391 for (cur_place = 0; cur_place < bo->placement.num_placement; 3392 cur_place++) { 3393 if (bo->placements[cur_place].mem_type == mem_type) 3394 return true; 3395 } 3396 3397 return false; 3398 } 3399 3400 static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place) 3401 { 3402 memset(place, 0, sizeof(*place)); 3403 place->mem_type = mem_type; 3404 } 3405 3406 /** 3407 * xe_bo_migrate - Migrate an object to the desired region id 3408 * @bo: The buffer object to migrate. 3409 * @mem_type: The TTM region type to migrate to. 3410 * @tctx: A pointer to a struct ttm_operation_ctx or NULL if 3411 * a default interruptibe ctx is to be used. 3412 * @exec: The drm_exec transaction to use for exhaustive eviction. 3413 * 3414 * Attempt to migrate the buffer object to the desired memory region. The 3415 * buffer object may not be pinned, and must be locked. 3416 * On successful completion, the object memory type will be updated, 3417 * but an async migration task may not have completed yet, and to 3418 * accomplish that, the object's kernel fences must be signaled with 3419 * the object lock held. 3420 * 3421 * Return: 0 on success. Negative error code on failure. In particular may 3422 * return -EINTR or -ERESTARTSYS if signal pending. 3423 */ 3424 int xe_bo_migrate(struct xe_bo *bo, u32 mem_type, struct ttm_operation_ctx *tctx, 3425 struct drm_exec *exec) 3426 { 3427 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); 3428 struct ttm_operation_ctx ctx = { 3429 .interruptible = true, 3430 .no_wait_gpu = false, 3431 .gfp_retry_mayfail = true, 3432 }; 3433 struct ttm_placement placement; 3434 struct ttm_place requested; 3435 3436 xe_bo_assert_held(bo); 3437 tctx = tctx ? tctx : &ctx; 3438 3439 if (bo->ttm.resource->mem_type == mem_type) 3440 return 0; 3441 3442 if (xe_bo_is_pinned(bo)) 3443 return -EBUSY; 3444 3445 if (!xe_bo_can_migrate(bo, mem_type)) 3446 return -EINVAL; 3447 3448 xe_place_from_ttm_type(mem_type, &requested); 3449 placement.num_placement = 1; 3450 placement.placement = &requested; 3451 3452 /* 3453 * Stolen needs to be handled like below VRAM handling if we ever need 3454 * to support it. 3455 */ 3456 drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN); 3457 3458 if (mem_type_is_vram(mem_type)) { 3459 u32 c = 0; 3460 3461 add_vram(xe, bo, &requested, bo->flags, mem_type, &c); 3462 } 3463 3464 if (!tctx->no_wait_gpu) 3465 xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base); 3466 return ttm_bo_validate(&bo->ttm, &placement, tctx); 3467 } 3468 3469 /** 3470 * xe_bo_evict - Evict an object to evict placement 3471 * @bo: The buffer object to migrate. 3472 * @exec: The drm_exec transaction to use for exhaustive eviction. 3473 * 3474 * On successful completion, the object memory will be moved to evict 3475 * placement. This function blocks until the object has been fully moved. 3476 * 3477 * Return: 0 on success. Negative error code on failure. 3478 */ 3479 int xe_bo_evict(struct xe_bo *bo, struct drm_exec *exec) 3480 { 3481 struct ttm_operation_ctx ctx = { 3482 .interruptible = false, 3483 .no_wait_gpu = false, 3484 .gfp_retry_mayfail = true, 3485 }; 3486 struct ttm_placement placement; 3487 int ret; 3488 3489 xe_evict_flags(&bo->ttm, &placement); 3490 ret = ttm_bo_validate(&bo->ttm, &placement, &ctx); 3491 if (ret) 3492 return ret; 3493 3494 dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL, 3495 false, MAX_SCHEDULE_TIMEOUT); 3496 3497 return 0; 3498 } 3499 3500 /** 3501 * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when 3502 * placed in system memory. 3503 * @bo: The xe_bo 3504 * 3505 * Return: true if extra pages need to be allocated, false otherwise. 3506 */ 3507 bool xe_bo_needs_ccs_pages(struct xe_bo *bo) 3508 { 3509 struct xe_device *xe = xe_bo_device(bo); 3510 3511 if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe)) 3512 return false; 3513 3514 if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device) 3515 return false; 3516 3517 /* On discrete GPUs, if the GPU can access this buffer from 3518 * system memory (i.e., it allows XE_PL_TT placement), FlatCCS 3519 * can't be used since there's no CCS storage associated with 3520 * non-VRAM addresses. 3521 */ 3522 if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM)) 3523 return false; 3524 3525 /* 3526 * Compression implies coh_none, therefore we know for sure that WB 3527 * memory can't currently use compression, which is likely one of the 3528 * common cases. 3529 */ 3530 if (bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB) 3531 return false; 3532 3533 return true; 3534 } 3535 3536 /** 3537 * __xe_bo_release_dummy() - Dummy kref release function 3538 * @kref: The embedded struct kref. 3539 * 3540 * Dummy release function for xe_bo_put_deferred(). Keep off. 3541 */ 3542 void __xe_bo_release_dummy(struct kref *kref) 3543 { 3544 } 3545 3546 /** 3547 * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred(). 3548 * @deferred: The lockless list used for the call to xe_bo_put_deferred(). 3549 * 3550 * Puts all bos whose put was deferred by xe_bo_put_deferred(). 3551 * The @deferred list can be either an onstack local list or a global 3552 * shared list used by a workqueue. 3553 */ 3554 void xe_bo_put_commit(struct llist_head *deferred) 3555 { 3556 struct llist_node *freed; 3557 struct xe_bo *bo, *next; 3558 3559 if (!deferred) 3560 return; 3561 3562 freed = llist_del_all(deferred); 3563 if (!freed) 3564 return; 3565 3566 llist_for_each_entry_safe(bo, next, freed, freed) 3567 drm_gem_object_free(&bo->ttm.base.refcount); 3568 } 3569 3570 static void xe_bo_dev_work_func(struct work_struct *work) 3571 { 3572 struct xe_bo_dev *bo_dev = container_of(work, typeof(*bo_dev), async_free); 3573 3574 xe_bo_put_commit(&bo_dev->async_list); 3575 } 3576 3577 /** 3578 * xe_bo_dev_init() - Initialize BO dev to manage async BO freeing 3579 * @bo_dev: The BO dev structure 3580 */ 3581 void xe_bo_dev_init(struct xe_bo_dev *bo_dev) 3582 { 3583 INIT_WORK(&bo_dev->async_free, xe_bo_dev_work_func); 3584 } 3585 3586 /** 3587 * xe_bo_dev_fini() - Finalize BO dev managing async BO freeing 3588 * @bo_dev: The BO dev structure 3589 */ 3590 void xe_bo_dev_fini(struct xe_bo_dev *bo_dev) 3591 { 3592 flush_work(&bo_dev->async_free); 3593 } 3594 3595 void xe_bo_put(struct xe_bo *bo) 3596 { 3597 struct xe_tile *tile; 3598 u8 id; 3599 3600 might_sleep(); 3601 if (bo) { 3602 #ifdef CONFIG_PROC_FS 3603 if (bo->client) 3604 might_lock(&bo->client->bos_lock); 3605 #endif 3606 for_each_tile(tile, xe_bo_device(bo), id) 3607 if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt) 3608 xe_ggtt_might_lock(bo->ggtt_node[id]->ggtt); 3609 drm_gem_object_put(&bo->ttm.base); 3610 } 3611 } 3612 3613 /** 3614 * xe_bo_dumb_create - Create a dumb bo as backing for a fb 3615 * @file_priv: ... 3616 * @dev: ... 3617 * @args: ... 3618 * 3619 * See dumb_create() hook in include/drm/drm_drv.h 3620 * 3621 * Return: ... 3622 */ 3623 int xe_bo_dumb_create(struct drm_file *file_priv, 3624 struct drm_device *dev, 3625 struct drm_mode_create_dumb *args) 3626 { 3627 struct xe_device *xe = to_xe_device(dev); 3628 struct xe_bo *bo; 3629 uint32_t handle; 3630 int err; 3631 u32 page_size = max_t(u32, PAGE_SIZE, 3632 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K); 3633 3634 err = drm_mode_size_dumb(dev, args, SZ_64, page_size); 3635 if (err) 3636 return err; 3637 3638 bo = xe_bo_create_user(xe, NULL, args->size, 3639 DRM_XE_GEM_CPU_CACHING_WC, 3640 XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | 3641 XE_BO_FLAG_SCANOUT | 3642 XE_BO_FLAG_NEEDS_CPU_ACCESS, NULL); 3643 if (IS_ERR(bo)) 3644 return PTR_ERR(bo); 3645 3646 err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle); 3647 /* drop reference from allocate - handle holds it now */ 3648 drm_gem_object_put(&bo->ttm.base); 3649 if (!err) 3650 args->handle = handle; 3651 return err; 3652 } 3653 3654 void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo) 3655 { 3656 struct ttm_buffer_object *tbo = &bo->ttm; 3657 struct ttm_device *bdev = tbo->bdev; 3658 3659 drm_vma_node_unmap(&tbo->base.vma_node, bdev->dev_mapping); 3660 3661 list_del_init(&bo->vram_userfault_link); 3662 } 3663 3664 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 3665 #include "tests/xe_bo.c" 3666 #endif 3667