1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_bo.h" 7 8 #include <linux/dma-buf.h> 9 10 #include <drm/drm_drv.h> 11 #include <drm/drm_gem_ttm_helper.h> 12 #include <drm/drm_managed.h> 13 #include <drm/ttm/ttm_device.h> 14 #include <drm/ttm/ttm_placement.h> 15 #include <drm/ttm/ttm_tt.h> 16 #include <uapi/drm/xe_drm.h> 17 18 #include "xe_device.h" 19 #include "xe_dma_buf.h" 20 #include "xe_drm_client.h" 21 #include "xe_ggtt.h" 22 #include "xe_gt.h" 23 #include "xe_map.h" 24 #include "xe_migrate.h" 25 #include "xe_pm.h" 26 #include "xe_preempt_fence.h" 27 #include "xe_res_cursor.h" 28 #include "xe_trace_bo.h" 29 #include "xe_ttm_stolen_mgr.h" 30 #include "xe_vm.h" 31 32 const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES] = { 33 [XE_PL_SYSTEM] = "system", 34 [XE_PL_TT] = "gtt", 35 [XE_PL_VRAM0] = "vram0", 36 [XE_PL_VRAM1] = "vram1", 37 [XE_PL_STOLEN] = "stolen" 38 }; 39 40 static const struct ttm_place sys_placement_flags = { 41 .fpfn = 0, 42 .lpfn = 0, 43 .mem_type = XE_PL_SYSTEM, 44 .flags = 0, 45 }; 46 47 static struct ttm_placement sys_placement = { 48 .num_placement = 1, 49 .placement = &sys_placement_flags, 50 }; 51 52 static const struct ttm_place tt_placement_flags[] = { 53 { 54 .fpfn = 0, 55 .lpfn = 0, 56 .mem_type = XE_PL_TT, 57 .flags = TTM_PL_FLAG_DESIRED, 58 }, 59 { 60 .fpfn = 0, 61 .lpfn = 0, 62 .mem_type = XE_PL_SYSTEM, 63 .flags = TTM_PL_FLAG_FALLBACK, 64 } 65 }; 66 67 static struct ttm_placement tt_placement = { 68 .num_placement = 2, 69 .placement = tt_placement_flags, 70 }; 71 72 bool mem_type_is_vram(u32 mem_type) 73 { 74 return mem_type >= XE_PL_VRAM0 && mem_type != XE_PL_STOLEN; 75 } 76 77 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res) 78 { 79 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe); 80 } 81 82 static bool resource_is_vram(struct ttm_resource *res) 83 { 84 return mem_type_is_vram(res->mem_type); 85 } 86 87 bool xe_bo_is_vram(struct xe_bo *bo) 88 { 89 return resource_is_vram(bo->ttm.resource) || 90 resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource); 91 } 92 93 bool xe_bo_is_stolen(struct xe_bo *bo) 94 { 95 return bo->ttm.resource->mem_type == XE_PL_STOLEN; 96 } 97 98 /** 99 * xe_bo_has_single_placement - check if BO is placed only in one memory location 100 * @bo: The BO 101 * 102 * This function checks whether a given BO is placed in only one memory location. 103 * 104 * Returns: true if the BO is placed in a single memory location, false otherwise. 105 * 106 */ 107 bool xe_bo_has_single_placement(struct xe_bo *bo) 108 { 109 return bo->placement.num_placement == 1; 110 } 111 112 /** 113 * xe_bo_is_stolen_devmem - check if BO is of stolen type accessed via PCI BAR 114 * @bo: The BO 115 * 116 * The stolen memory is accessed through the PCI BAR for both DGFX and some 117 * integrated platforms that have a dedicated bit in the PTE for devmem (DM). 118 * 119 * Returns: true if it's stolen memory accessed via PCI BAR, false otherwise. 120 */ 121 bool xe_bo_is_stolen_devmem(struct xe_bo *bo) 122 { 123 return xe_bo_is_stolen(bo) && 124 GRAPHICS_VERx100(xe_bo_device(bo)) >= 1270; 125 } 126 127 static bool xe_bo_is_user(struct xe_bo *bo) 128 { 129 return bo->flags & XE_BO_FLAG_USER; 130 } 131 132 static struct xe_migrate * 133 mem_type_to_migrate(struct xe_device *xe, u32 mem_type) 134 { 135 struct xe_tile *tile; 136 137 xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type)); 138 tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)]; 139 return tile->migrate; 140 } 141 142 static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res) 143 { 144 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); 145 struct ttm_resource_manager *mgr; 146 147 xe_assert(xe, resource_is_vram(res)); 148 mgr = ttm_manager_type(&xe->ttm, res->mem_type); 149 return to_xe_ttm_vram_mgr(mgr)->vram; 150 } 151 152 static void try_add_system(struct xe_device *xe, struct xe_bo *bo, 153 u32 bo_flags, u32 *c) 154 { 155 if (bo_flags & XE_BO_FLAG_SYSTEM) { 156 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); 157 158 bo->placements[*c] = (struct ttm_place) { 159 .mem_type = XE_PL_TT, 160 }; 161 *c += 1; 162 } 163 } 164 165 static void add_vram(struct xe_device *xe, struct xe_bo *bo, 166 struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c) 167 { 168 struct ttm_place place = { .mem_type = mem_type }; 169 struct xe_mem_region *vram; 170 u64 io_size; 171 172 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); 173 174 vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram; 175 xe_assert(xe, vram && vram->usable_size); 176 io_size = vram->io_size; 177 178 /* 179 * For eviction / restore on suspend / resume objects 180 * pinned in VRAM must be contiguous 181 */ 182 if (bo_flags & (XE_BO_FLAG_PINNED | 183 XE_BO_FLAG_GGTT)) 184 place.flags |= TTM_PL_FLAG_CONTIGUOUS; 185 186 if (io_size < vram->usable_size) { 187 if (bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) { 188 place.fpfn = 0; 189 place.lpfn = io_size >> PAGE_SHIFT; 190 } else { 191 place.flags |= TTM_PL_FLAG_TOPDOWN; 192 } 193 } 194 places[*c] = place; 195 *c += 1; 196 } 197 198 static void try_add_vram(struct xe_device *xe, struct xe_bo *bo, 199 u32 bo_flags, u32 *c) 200 { 201 if (bo_flags & XE_BO_FLAG_VRAM0) 202 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c); 203 if (bo_flags & XE_BO_FLAG_VRAM1) 204 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c); 205 } 206 207 static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo, 208 u32 bo_flags, u32 *c) 209 { 210 if (bo_flags & XE_BO_FLAG_STOLEN) { 211 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); 212 213 bo->placements[*c] = (struct ttm_place) { 214 .mem_type = XE_PL_STOLEN, 215 .flags = bo_flags & (XE_BO_FLAG_PINNED | 216 XE_BO_FLAG_GGTT) ? 217 TTM_PL_FLAG_CONTIGUOUS : 0, 218 }; 219 *c += 1; 220 } 221 } 222 223 static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, 224 u32 bo_flags) 225 { 226 u32 c = 0; 227 228 try_add_vram(xe, bo, bo_flags, &c); 229 try_add_system(xe, bo, bo_flags, &c); 230 try_add_stolen(xe, bo, bo_flags, &c); 231 232 if (!c) 233 return -EINVAL; 234 235 bo->placement = (struct ttm_placement) { 236 .num_placement = c, 237 .placement = bo->placements, 238 }; 239 240 return 0; 241 } 242 243 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo, 244 u32 bo_flags) 245 { 246 xe_bo_assert_held(bo); 247 return __xe_bo_placement_for_flags(xe, bo, bo_flags); 248 } 249 250 static void xe_evict_flags(struct ttm_buffer_object *tbo, 251 struct ttm_placement *placement) 252 { 253 if (!xe_bo_is_xe_bo(tbo)) { 254 /* Don't handle scatter gather BOs */ 255 if (tbo->type == ttm_bo_type_sg) { 256 placement->num_placement = 0; 257 return; 258 } 259 260 *placement = sys_placement; 261 return; 262 } 263 264 /* 265 * For xe, sg bos that are evicted to system just triggers a 266 * rebind of the sg list upon subsequent validation to XE_PL_TT. 267 */ 268 switch (tbo->resource->mem_type) { 269 case XE_PL_VRAM0: 270 case XE_PL_VRAM1: 271 case XE_PL_STOLEN: 272 *placement = tt_placement; 273 break; 274 case XE_PL_TT: 275 default: 276 *placement = sys_placement; 277 break; 278 } 279 } 280 281 struct xe_ttm_tt { 282 struct ttm_tt ttm; 283 struct device *dev; 284 struct sg_table sgt; 285 struct sg_table *sg; 286 }; 287 288 static int xe_tt_map_sg(struct ttm_tt *tt) 289 { 290 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); 291 unsigned long num_pages = tt->num_pages; 292 int ret; 293 294 XE_WARN_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL); 295 296 if (xe_tt->sg) 297 return 0; 298 299 ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages, 300 num_pages, 0, 301 (u64)num_pages << PAGE_SHIFT, 302 xe_sg_segment_size(xe_tt->dev), 303 GFP_KERNEL); 304 if (ret) 305 return ret; 306 307 xe_tt->sg = &xe_tt->sgt; 308 ret = dma_map_sgtable(xe_tt->dev, xe_tt->sg, DMA_BIDIRECTIONAL, 309 DMA_ATTR_SKIP_CPU_SYNC); 310 if (ret) { 311 sg_free_table(xe_tt->sg); 312 xe_tt->sg = NULL; 313 return ret; 314 } 315 316 return 0; 317 } 318 319 static void xe_tt_unmap_sg(struct ttm_tt *tt) 320 { 321 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); 322 323 if (xe_tt->sg) { 324 dma_unmap_sgtable(xe_tt->dev, xe_tt->sg, 325 DMA_BIDIRECTIONAL, 0); 326 sg_free_table(xe_tt->sg); 327 xe_tt->sg = NULL; 328 } 329 } 330 331 struct sg_table *xe_bo_sg(struct xe_bo *bo) 332 { 333 struct ttm_tt *tt = bo->ttm.ttm; 334 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm); 335 336 return xe_tt->sg; 337 } 338 339 static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, 340 u32 page_flags) 341 { 342 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); 343 struct xe_device *xe = xe_bo_device(bo); 344 struct xe_ttm_tt *tt; 345 unsigned long extra_pages; 346 enum ttm_caching caching = ttm_cached; 347 int err; 348 349 tt = kzalloc(sizeof(*tt), GFP_KERNEL); 350 if (!tt) 351 return NULL; 352 353 tt->dev = xe->drm.dev; 354 355 extra_pages = 0; 356 if (xe_bo_needs_ccs_pages(bo)) 357 extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size), 358 PAGE_SIZE); 359 360 /* 361 * DGFX system memory is always WB / ttm_cached, since 362 * other caching modes are only supported on x86. DGFX 363 * GPU system memory accesses are always coherent with the 364 * CPU. 365 */ 366 if (!IS_DGFX(xe)) { 367 switch (bo->cpu_caching) { 368 case DRM_XE_GEM_CPU_CACHING_WC: 369 caching = ttm_write_combined; 370 break; 371 default: 372 caching = ttm_cached; 373 break; 374 } 375 376 WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching); 377 378 /* 379 * Display scanout is always non-coherent with the CPU cache. 380 * 381 * For Xe_LPG and beyond, PPGTT PTE lookups are also 382 * non-coherent and require a CPU:WC mapping. 383 */ 384 if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) || 385 (xe->info.graphics_verx100 >= 1270 && 386 bo->flags & XE_BO_FLAG_PAGETABLE)) 387 caching = ttm_write_combined; 388 } 389 390 if (bo->flags & XE_BO_FLAG_NEEDS_UC) { 391 /* 392 * Valid only for internally-created buffers only, for 393 * which cpu_caching is never initialized. 394 */ 395 xe_assert(xe, bo->cpu_caching == 0); 396 caching = ttm_uncached; 397 } 398 399 err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages); 400 if (err) { 401 kfree(tt); 402 return NULL; 403 } 404 405 return &tt->ttm; 406 } 407 408 static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt, 409 struct ttm_operation_ctx *ctx) 410 { 411 int err; 412 413 /* 414 * dma-bufs are not populated with pages, and the dma- 415 * addresses are set up when moved to XE_PL_TT. 416 */ 417 if (tt->page_flags & TTM_TT_FLAG_EXTERNAL) 418 return 0; 419 420 err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx); 421 if (err) 422 return err; 423 424 return err; 425 } 426 427 static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt) 428 { 429 if (tt->page_flags & TTM_TT_FLAG_EXTERNAL) 430 return; 431 432 xe_tt_unmap_sg(tt); 433 434 return ttm_pool_free(&ttm_dev->pool, tt); 435 } 436 437 static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt) 438 { 439 ttm_tt_fini(tt); 440 kfree(tt); 441 } 442 443 static int xe_ttm_io_mem_reserve(struct ttm_device *bdev, 444 struct ttm_resource *mem) 445 { 446 struct xe_device *xe = ttm_to_xe_device(bdev); 447 448 switch (mem->mem_type) { 449 case XE_PL_SYSTEM: 450 case XE_PL_TT: 451 return 0; 452 case XE_PL_VRAM0: 453 case XE_PL_VRAM1: { 454 struct xe_ttm_vram_mgr_resource *vres = 455 to_xe_ttm_vram_mgr_resource(mem); 456 struct xe_mem_region *vram = res_to_mem_region(mem); 457 458 if (vres->used_visible_size < mem->size) 459 return -EINVAL; 460 461 mem->bus.offset = mem->start << PAGE_SHIFT; 462 463 if (vram->mapping && 464 mem->placement & TTM_PL_FLAG_CONTIGUOUS) 465 mem->bus.addr = (u8 __force *)vram->mapping + 466 mem->bus.offset; 467 468 mem->bus.offset += vram->io_start; 469 mem->bus.is_iomem = true; 470 471 #if !defined(CONFIG_X86) 472 mem->bus.caching = ttm_write_combined; 473 #endif 474 return 0; 475 } case XE_PL_STOLEN: 476 return xe_ttm_stolen_io_mem_reserve(xe, mem); 477 default: 478 return -EINVAL; 479 } 480 } 481 482 static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo, 483 const struct ttm_operation_ctx *ctx) 484 { 485 struct dma_resv_iter cursor; 486 struct dma_fence *fence; 487 struct drm_gem_object *obj = &bo->ttm.base; 488 struct drm_gpuvm_bo *vm_bo; 489 bool idle = false; 490 int ret = 0; 491 492 dma_resv_assert_held(bo->ttm.base.resv); 493 494 if (!list_empty(&bo->ttm.base.gpuva.list)) { 495 dma_resv_iter_begin(&cursor, bo->ttm.base.resv, 496 DMA_RESV_USAGE_BOOKKEEP); 497 dma_resv_for_each_fence_unlocked(&cursor, fence) 498 dma_fence_enable_sw_signaling(fence); 499 dma_resv_iter_end(&cursor); 500 } 501 502 drm_gem_for_each_gpuvm_bo(vm_bo, obj) { 503 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); 504 struct drm_gpuva *gpuva; 505 506 if (!xe_vm_in_fault_mode(vm)) { 507 drm_gpuvm_bo_evict(vm_bo, true); 508 continue; 509 } 510 511 if (!idle) { 512 long timeout; 513 514 if (ctx->no_wait_gpu && 515 !dma_resv_test_signaled(bo->ttm.base.resv, 516 DMA_RESV_USAGE_BOOKKEEP)) 517 return -EBUSY; 518 519 timeout = dma_resv_wait_timeout(bo->ttm.base.resv, 520 DMA_RESV_USAGE_BOOKKEEP, 521 ctx->interruptible, 522 MAX_SCHEDULE_TIMEOUT); 523 if (!timeout) 524 return -ETIME; 525 if (timeout < 0) 526 return timeout; 527 528 idle = true; 529 } 530 531 drm_gpuvm_bo_for_each_va(gpuva, vm_bo) { 532 struct xe_vma *vma = gpuva_to_vma(gpuva); 533 534 trace_xe_vma_evict(vma); 535 ret = xe_vm_invalidate_vma(vma); 536 if (XE_WARN_ON(ret)) 537 return ret; 538 } 539 } 540 541 return ret; 542 } 543 544 /* 545 * The dma-buf map_attachment() / unmap_attachment() is hooked up here. 546 * Note that unmapping the attachment is deferred to the next 547 * map_attachment time, or to bo destroy (after idling) whichever comes first. 548 * This is to avoid syncing before unmap_attachment(), assuming that the 549 * caller relies on idling the reservation object before moving the 550 * backing store out. Should that assumption not hold, then we will be able 551 * to unconditionally call unmap_attachment() when moving out to system. 552 */ 553 static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo, 554 struct ttm_resource *new_res) 555 { 556 struct dma_buf_attachment *attach = ttm_bo->base.import_attach; 557 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt, 558 ttm); 559 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); 560 struct sg_table *sg; 561 562 xe_assert(xe, attach); 563 xe_assert(xe, ttm_bo->ttm); 564 565 if (new_res->mem_type == XE_PL_SYSTEM) 566 goto out; 567 568 if (ttm_bo->sg) { 569 dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL); 570 ttm_bo->sg = NULL; 571 } 572 573 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 574 if (IS_ERR(sg)) 575 return PTR_ERR(sg); 576 577 ttm_bo->sg = sg; 578 xe_tt->sg = sg; 579 580 out: 581 ttm_bo_move_null(ttm_bo, new_res); 582 583 return 0; 584 } 585 586 /** 587 * xe_bo_move_notify - Notify subsystems of a pending move 588 * @bo: The buffer object 589 * @ctx: The struct ttm_operation_ctx controlling locking and waits. 590 * 591 * This function notifies subsystems of an upcoming buffer move. 592 * Upon receiving such a notification, subsystems should schedule 593 * halting access to the underlying pages and optionally add a fence 594 * to the buffer object's dma_resv object, that signals when access is 595 * stopped. The caller will wait on all dma_resv fences before 596 * starting the move. 597 * 598 * A subsystem may commence access to the object after obtaining 599 * bindings to the new backing memory under the object lock. 600 * 601 * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode, 602 * negative error code on error. 603 */ 604 static int xe_bo_move_notify(struct xe_bo *bo, 605 const struct ttm_operation_ctx *ctx) 606 { 607 struct ttm_buffer_object *ttm_bo = &bo->ttm; 608 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); 609 struct ttm_resource *old_mem = ttm_bo->resource; 610 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM; 611 int ret; 612 613 /* 614 * If this starts to call into many components, consider 615 * using a notification chain here. 616 */ 617 618 if (xe_bo_is_pinned(bo)) 619 return -EINVAL; 620 621 xe_bo_vunmap(bo); 622 ret = xe_bo_trigger_rebind(xe, bo, ctx); 623 if (ret) 624 return ret; 625 626 /* Don't call move_notify() for imported dma-bufs. */ 627 if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach) 628 dma_buf_move_notify(ttm_bo->base.dma_buf); 629 630 /* 631 * TTM has already nuked the mmap for us (see ttm_bo_unmap_virtual), 632 * so if we moved from VRAM make sure to unlink this from the userfault 633 * tracking. 634 */ 635 if (mem_type_is_vram(old_mem_type)) { 636 mutex_lock(&xe->mem_access.vram_userfault.lock); 637 if (!list_empty(&bo->vram_userfault_link)) 638 list_del_init(&bo->vram_userfault_link); 639 mutex_unlock(&xe->mem_access.vram_userfault.lock); 640 } 641 642 return 0; 643 } 644 645 static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, 646 struct ttm_operation_ctx *ctx, 647 struct ttm_resource *new_mem, 648 struct ttm_place *hop) 649 { 650 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); 651 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); 652 struct ttm_resource *old_mem = ttm_bo->resource; 653 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM; 654 struct ttm_tt *ttm = ttm_bo->ttm; 655 struct xe_migrate *migrate = NULL; 656 struct dma_fence *fence; 657 bool move_lacks_source; 658 bool tt_has_data; 659 bool needs_clear; 660 bool handle_system_ccs = (!IS_DGFX(xe) && xe_bo_needs_ccs_pages(bo) && 661 ttm && ttm_tt_is_populated(ttm)) ? true : false; 662 int ret = 0; 663 664 /* Bo creation path, moving to system or TT. */ 665 if ((!old_mem && ttm) && !handle_system_ccs) { 666 if (new_mem->mem_type == XE_PL_TT) 667 ret = xe_tt_map_sg(ttm); 668 if (!ret) 669 ttm_bo_move_null(ttm_bo, new_mem); 670 goto out; 671 } 672 673 if (ttm_bo->type == ttm_bo_type_sg) { 674 ret = xe_bo_move_notify(bo, ctx); 675 if (!ret) 676 ret = xe_bo_move_dmabuf(ttm_bo, new_mem); 677 return ret; 678 } 679 680 tt_has_data = ttm && (ttm_tt_is_populated(ttm) || 681 (ttm->page_flags & TTM_TT_FLAG_SWAPPED)); 682 683 move_lacks_source = !old_mem || (handle_system_ccs ? (!bo->ccs_cleared) : 684 (!mem_type_is_vram(old_mem_type) && !tt_has_data)); 685 686 needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) || 687 (!ttm && ttm_bo->type == ttm_bo_type_device); 688 689 if (new_mem->mem_type == XE_PL_TT) { 690 ret = xe_tt_map_sg(ttm); 691 if (ret) 692 goto out; 693 } 694 695 if ((move_lacks_source && !needs_clear)) { 696 ttm_bo_move_null(ttm_bo, new_mem); 697 goto out; 698 } 699 700 if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) { 701 ttm_bo_move_null(ttm_bo, new_mem); 702 goto out; 703 } 704 705 /* 706 * Failed multi-hop where the old_mem is still marked as 707 * TTM_PL_FLAG_TEMPORARY, should just be a dummy move. 708 */ 709 if (old_mem_type == XE_PL_TT && 710 new_mem->mem_type == XE_PL_TT) { 711 ttm_bo_move_null(ttm_bo, new_mem); 712 goto out; 713 } 714 715 if (!move_lacks_source && !xe_bo_is_pinned(bo)) { 716 ret = xe_bo_move_notify(bo, ctx); 717 if (ret) 718 goto out; 719 } 720 721 if (old_mem_type == XE_PL_TT && 722 new_mem->mem_type == XE_PL_SYSTEM) { 723 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv, 724 DMA_RESV_USAGE_BOOKKEEP, 725 true, 726 MAX_SCHEDULE_TIMEOUT); 727 if (timeout < 0) { 728 ret = timeout; 729 goto out; 730 } 731 732 if (!handle_system_ccs) { 733 ttm_bo_move_null(ttm_bo, new_mem); 734 goto out; 735 } 736 } 737 738 if (!move_lacks_source && 739 ((old_mem_type == XE_PL_SYSTEM && resource_is_vram(new_mem)) || 740 (mem_type_is_vram(old_mem_type) && 741 new_mem->mem_type == XE_PL_SYSTEM))) { 742 hop->fpfn = 0; 743 hop->lpfn = 0; 744 hop->mem_type = XE_PL_TT; 745 hop->flags = TTM_PL_FLAG_TEMPORARY; 746 ret = -EMULTIHOP; 747 goto out; 748 } 749 750 if (bo->tile) 751 migrate = bo->tile->migrate; 752 else if (resource_is_vram(new_mem)) 753 migrate = mem_type_to_migrate(xe, new_mem->mem_type); 754 else if (mem_type_is_vram(old_mem_type)) 755 migrate = mem_type_to_migrate(xe, old_mem_type); 756 else 757 migrate = xe->tiles[0].migrate; 758 759 xe_assert(xe, migrate); 760 trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source); 761 if (xe_rpm_reclaim_safe(xe)) { 762 /* 763 * We might be called through swapout in the validation path of 764 * another TTM device, so unconditionally acquire rpm here. 765 */ 766 xe_pm_runtime_get(xe); 767 } else { 768 drm_WARN_ON(&xe->drm, handle_system_ccs); 769 xe_pm_runtime_get_noresume(xe); 770 } 771 772 if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) { 773 /* 774 * Kernel memory that is pinned should only be moved on suspend 775 * / resume, some of the pinned memory is required for the 776 * device to resume / use the GPU to move other evicted memory 777 * (user memory) around. This likely could be optimized a bit 778 * futher where we find the minimum set of pinned memory 779 * required for resume but for simplity doing a memcpy for all 780 * pinned memory. 781 */ 782 ret = xe_bo_vmap(bo); 783 if (!ret) { 784 ret = ttm_bo_move_memcpy(ttm_bo, ctx, new_mem); 785 786 /* Create a new VMAP once kernel BO back in VRAM */ 787 if (!ret && resource_is_vram(new_mem)) { 788 struct xe_mem_region *vram = res_to_mem_region(new_mem); 789 void __iomem *new_addr = vram->mapping + 790 (new_mem->start << PAGE_SHIFT); 791 792 if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) { 793 ret = -EINVAL; 794 xe_pm_runtime_put(xe); 795 goto out; 796 } 797 798 xe_assert(xe, new_mem->start == 799 bo->placements->fpfn); 800 801 iosys_map_set_vaddr_iomem(&bo->vmap, new_addr); 802 } 803 } 804 } else { 805 if (move_lacks_source) { 806 u32 flags = 0; 807 808 if (mem_type_is_vram(new_mem->mem_type)) 809 flags |= XE_MIGRATE_CLEAR_FLAG_FULL; 810 else if (handle_system_ccs) 811 flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA; 812 813 fence = xe_migrate_clear(migrate, bo, new_mem, flags); 814 } 815 else 816 fence = xe_migrate_copy(migrate, bo, bo, old_mem, 817 new_mem, handle_system_ccs); 818 if (IS_ERR(fence)) { 819 ret = PTR_ERR(fence); 820 xe_pm_runtime_put(xe); 821 goto out; 822 } 823 if (!move_lacks_source) { 824 ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict, 825 true, new_mem); 826 if (ret) { 827 dma_fence_wait(fence, false); 828 ttm_bo_move_null(ttm_bo, new_mem); 829 ret = 0; 830 } 831 } else { 832 /* 833 * ttm_bo_move_accel_cleanup() may blow up if 834 * bo->resource == NULL, so just attach the 835 * fence and set the new resource. 836 */ 837 dma_resv_add_fence(ttm_bo->base.resv, fence, 838 DMA_RESV_USAGE_KERNEL); 839 ttm_bo_move_null(ttm_bo, new_mem); 840 } 841 842 dma_fence_put(fence); 843 } 844 845 xe_pm_runtime_put(xe); 846 847 out: 848 if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) && 849 ttm_bo->ttm) 850 xe_tt_unmap_sg(ttm_bo->ttm); 851 852 return ret; 853 } 854 855 /** 856 * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory 857 * @bo: The buffer object to move. 858 * 859 * On successful completion, the object memory will be moved to sytem memory. 860 * 861 * This is needed to for special handling of pinned VRAM object during 862 * suspend-resume. 863 * 864 * Return: 0 on success. Negative error code on failure. 865 */ 866 int xe_bo_evict_pinned(struct xe_bo *bo) 867 { 868 struct ttm_place place = { 869 .mem_type = XE_PL_TT, 870 }; 871 struct ttm_placement placement = { 872 .placement = &place, 873 .num_placement = 1, 874 }; 875 struct ttm_operation_ctx ctx = { 876 .interruptible = false, 877 }; 878 struct ttm_resource *new_mem; 879 int ret; 880 881 xe_bo_assert_held(bo); 882 883 if (WARN_ON(!bo->ttm.resource)) 884 return -EINVAL; 885 886 if (WARN_ON(!xe_bo_is_pinned(bo))) 887 return -EINVAL; 888 889 if (WARN_ON(!xe_bo_is_vram(bo))) 890 return -EINVAL; 891 892 ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx); 893 if (ret) 894 return ret; 895 896 if (!bo->ttm.ttm) { 897 bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0); 898 if (!bo->ttm.ttm) { 899 ret = -ENOMEM; 900 goto err_res_free; 901 } 902 } 903 904 ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx); 905 if (ret) 906 goto err_res_free; 907 908 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); 909 if (ret) 910 goto err_res_free; 911 912 ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL); 913 if (ret) 914 goto err_res_free; 915 916 return 0; 917 918 err_res_free: 919 ttm_resource_free(&bo->ttm, &new_mem); 920 return ret; 921 } 922 923 /** 924 * xe_bo_restore_pinned() - Restore a pinned VRAM object 925 * @bo: The buffer object to move. 926 * 927 * On successful completion, the object memory will be moved back to VRAM. 928 * 929 * This is needed to for special handling of pinned VRAM object during 930 * suspend-resume. 931 * 932 * Return: 0 on success. Negative error code on failure. 933 */ 934 int xe_bo_restore_pinned(struct xe_bo *bo) 935 { 936 struct ttm_operation_ctx ctx = { 937 .interruptible = false, 938 }; 939 struct ttm_resource *new_mem; 940 int ret; 941 942 xe_bo_assert_held(bo); 943 944 if (WARN_ON(!bo->ttm.resource)) 945 return -EINVAL; 946 947 if (WARN_ON(!xe_bo_is_pinned(bo))) 948 return -EINVAL; 949 950 if (WARN_ON(xe_bo_is_vram(bo) || !bo->ttm.ttm)) 951 return -EINVAL; 952 953 ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx); 954 if (ret) 955 return ret; 956 957 ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx); 958 if (ret) 959 goto err_res_free; 960 961 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1); 962 if (ret) 963 goto err_res_free; 964 965 ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL); 966 if (ret) 967 goto err_res_free; 968 969 return 0; 970 971 err_res_free: 972 ttm_resource_free(&bo->ttm, &new_mem); 973 return ret; 974 } 975 976 static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo, 977 unsigned long page_offset) 978 { 979 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); 980 struct xe_res_cursor cursor; 981 struct xe_mem_region *vram; 982 983 if (ttm_bo->resource->mem_type == XE_PL_STOLEN) 984 return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT; 985 986 vram = res_to_mem_region(ttm_bo->resource); 987 xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor); 988 return (vram->io_start + cursor.start) >> PAGE_SHIFT; 989 } 990 991 static void __xe_bo_vunmap(struct xe_bo *bo); 992 993 /* 994 * TODO: Move this function to TTM so we don't rely on how TTM does its 995 * locking, thereby abusing TTM internals. 996 */ 997 static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo) 998 { 999 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); 1000 bool locked; 1001 1002 xe_assert(xe, !kref_read(&ttm_bo->kref)); 1003 1004 /* 1005 * We can typically only race with TTM trylocking under the 1006 * lru_lock, which will immediately be unlocked again since 1007 * the ttm_bo refcount is zero at this point. So trylocking *should* 1008 * always succeed here, as long as we hold the lru lock. 1009 */ 1010 spin_lock(&ttm_bo->bdev->lru_lock); 1011 locked = dma_resv_trylock(ttm_bo->base.resv); 1012 spin_unlock(&ttm_bo->bdev->lru_lock); 1013 xe_assert(xe, locked); 1014 1015 return locked; 1016 } 1017 1018 static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo) 1019 { 1020 struct dma_resv_iter cursor; 1021 struct dma_fence *fence; 1022 struct dma_fence *replacement = NULL; 1023 struct xe_bo *bo; 1024 1025 if (!xe_bo_is_xe_bo(ttm_bo)) 1026 return; 1027 1028 bo = ttm_to_xe_bo(ttm_bo); 1029 xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount))); 1030 1031 /* 1032 * Corner case where TTM fails to allocate memory and this BOs resv 1033 * still points the VMs resv 1034 */ 1035 if (ttm_bo->base.resv != &ttm_bo->base._resv) 1036 return; 1037 1038 if (!xe_ttm_bo_lock_in_destructor(ttm_bo)) 1039 return; 1040 1041 /* 1042 * Scrub the preempt fences if any. The unbind fence is already 1043 * attached to the resv. 1044 * TODO: Don't do this for external bos once we scrub them after 1045 * unbind. 1046 */ 1047 dma_resv_for_each_fence(&cursor, ttm_bo->base.resv, 1048 DMA_RESV_USAGE_BOOKKEEP, fence) { 1049 if (xe_fence_is_xe_preempt(fence) && 1050 !dma_fence_is_signaled(fence)) { 1051 if (!replacement) 1052 replacement = dma_fence_get_stub(); 1053 1054 dma_resv_replace_fences(ttm_bo->base.resv, 1055 fence->context, 1056 replacement, 1057 DMA_RESV_USAGE_BOOKKEEP); 1058 } 1059 } 1060 dma_fence_put(replacement); 1061 1062 dma_resv_unlock(ttm_bo->base.resv); 1063 } 1064 1065 static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo) 1066 { 1067 if (!xe_bo_is_xe_bo(ttm_bo)) 1068 return; 1069 1070 /* 1071 * Object is idle and about to be destroyed. Release the 1072 * dma-buf attachment. 1073 */ 1074 if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) { 1075 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, 1076 struct xe_ttm_tt, ttm); 1077 1078 dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg, 1079 DMA_BIDIRECTIONAL); 1080 ttm_bo->sg = NULL; 1081 xe_tt->sg = NULL; 1082 } 1083 } 1084 1085 const struct ttm_device_funcs xe_ttm_funcs = { 1086 .ttm_tt_create = xe_ttm_tt_create, 1087 .ttm_tt_populate = xe_ttm_tt_populate, 1088 .ttm_tt_unpopulate = xe_ttm_tt_unpopulate, 1089 .ttm_tt_destroy = xe_ttm_tt_destroy, 1090 .evict_flags = xe_evict_flags, 1091 .move = xe_bo_move, 1092 .io_mem_reserve = xe_ttm_io_mem_reserve, 1093 .io_mem_pfn = xe_ttm_io_mem_pfn, 1094 .release_notify = xe_ttm_bo_release_notify, 1095 .eviction_valuable = ttm_bo_eviction_valuable, 1096 .delete_mem_notify = xe_ttm_bo_delete_mem_notify, 1097 }; 1098 1099 static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo) 1100 { 1101 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); 1102 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); 1103 1104 if (bo->ttm.base.import_attach) 1105 drm_prime_gem_destroy(&bo->ttm.base, NULL); 1106 drm_gem_object_release(&bo->ttm.base); 1107 1108 xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list)); 1109 1110 if (bo->ggtt_node && bo->ggtt_node->base.size) 1111 xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo); 1112 1113 #ifdef CONFIG_PROC_FS 1114 if (bo->client) 1115 xe_drm_client_remove_bo(bo); 1116 #endif 1117 1118 if (bo->vm && xe_bo_is_user(bo)) 1119 xe_vm_put(bo->vm); 1120 1121 mutex_lock(&xe->mem_access.vram_userfault.lock); 1122 if (!list_empty(&bo->vram_userfault_link)) 1123 list_del(&bo->vram_userfault_link); 1124 mutex_unlock(&xe->mem_access.vram_userfault.lock); 1125 1126 kfree(bo); 1127 } 1128 1129 static void xe_gem_object_free(struct drm_gem_object *obj) 1130 { 1131 /* Our BO reference counting scheme works as follows: 1132 * 1133 * The gem object kref is typically used throughout the driver, 1134 * and the gem object holds a ttm_buffer_object refcount, so 1135 * that when the last gem object reference is put, which is when 1136 * we end up in this function, we put also that ttm_buffer_object 1137 * refcount. Anything using gem interfaces is then no longer 1138 * allowed to access the object in a way that requires a gem 1139 * refcount, including locking the object. 1140 * 1141 * driver ttm callbacks is allowed to use the ttm_buffer_object 1142 * refcount directly if needed. 1143 */ 1144 __xe_bo_vunmap(gem_to_xe_bo(obj)); 1145 ttm_bo_put(container_of(obj, struct ttm_buffer_object, base)); 1146 } 1147 1148 static void xe_gem_object_close(struct drm_gem_object *obj, 1149 struct drm_file *file_priv) 1150 { 1151 struct xe_bo *bo = gem_to_xe_bo(obj); 1152 1153 if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) { 1154 xe_assert(xe_bo_device(bo), xe_bo_is_user(bo)); 1155 1156 xe_bo_lock(bo, false); 1157 ttm_bo_set_bulk_move(&bo->ttm, NULL); 1158 xe_bo_unlock(bo); 1159 } 1160 } 1161 1162 static vm_fault_t xe_gem_fault(struct vm_fault *vmf) 1163 { 1164 struct ttm_buffer_object *tbo = vmf->vma->vm_private_data; 1165 struct drm_device *ddev = tbo->base.dev; 1166 struct xe_device *xe = to_xe_device(ddev); 1167 struct xe_bo *bo = ttm_to_xe_bo(tbo); 1168 bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK; 1169 vm_fault_t ret; 1170 int idx; 1171 1172 if (needs_rpm) 1173 xe_pm_runtime_get(xe); 1174 1175 ret = ttm_bo_vm_reserve(tbo, vmf); 1176 if (ret) 1177 goto out; 1178 1179 if (drm_dev_enter(ddev, &idx)) { 1180 trace_xe_bo_cpu_fault(bo); 1181 1182 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, 1183 TTM_BO_VM_NUM_PREFAULT); 1184 drm_dev_exit(idx); 1185 } else { 1186 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); 1187 } 1188 1189 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 1190 goto out; 1191 /* 1192 * ttm_bo_vm_reserve() already has dma_resv_lock. 1193 */ 1194 if (ret == VM_FAULT_NOPAGE && mem_type_is_vram(tbo->resource->mem_type)) { 1195 mutex_lock(&xe->mem_access.vram_userfault.lock); 1196 if (list_empty(&bo->vram_userfault_link)) 1197 list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list); 1198 mutex_unlock(&xe->mem_access.vram_userfault.lock); 1199 } 1200 1201 dma_resv_unlock(tbo->base.resv); 1202 out: 1203 if (needs_rpm) 1204 xe_pm_runtime_put(xe); 1205 1206 return ret; 1207 } 1208 1209 static const struct vm_operations_struct xe_gem_vm_ops = { 1210 .fault = xe_gem_fault, 1211 .open = ttm_bo_vm_open, 1212 .close = ttm_bo_vm_close, 1213 .access = ttm_bo_vm_access 1214 }; 1215 1216 static const struct drm_gem_object_funcs xe_gem_object_funcs = { 1217 .free = xe_gem_object_free, 1218 .close = xe_gem_object_close, 1219 .mmap = drm_gem_ttm_mmap, 1220 .export = xe_gem_prime_export, 1221 .vm_ops = &xe_gem_vm_ops, 1222 }; 1223 1224 /** 1225 * xe_bo_alloc - Allocate storage for a struct xe_bo 1226 * 1227 * This funcition is intended to allocate storage to be used for input 1228 * to __xe_bo_create_locked(), in the case a pointer to the bo to be 1229 * created is needed before the call to __xe_bo_create_locked(). 1230 * If __xe_bo_create_locked ends up never to be called, then the 1231 * storage allocated with this function needs to be freed using 1232 * xe_bo_free(). 1233 * 1234 * Return: A pointer to an uninitialized struct xe_bo on success, 1235 * ERR_PTR(-ENOMEM) on error. 1236 */ 1237 struct xe_bo *xe_bo_alloc(void) 1238 { 1239 struct xe_bo *bo = kzalloc(sizeof(*bo), GFP_KERNEL); 1240 1241 if (!bo) 1242 return ERR_PTR(-ENOMEM); 1243 1244 return bo; 1245 } 1246 1247 /** 1248 * xe_bo_free - Free storage allocated using xe_bo_alloc() 1249 * @bo: The buffer object storage. 1250 * 1251 * Refer to xe_bo_alloc() documentation for valid use-cases. 1252 */ 1253 void xe_bo_free(struct xe_bo *bo) 1254 { 1255 kfree(bo); 1256 } 1257 1258 struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, 1259 struct xe_tile *tile, struct dma_resv *resv, 1260 struct ttm_lru_bulk_move *bulk, size_t size, 1261 u16 cpu_caching, enum ttm_bo_type type, 1262 u32 flags) 1263 { 1264 struct ttm_operation_ctx ctx = { 1265 .interruptible = true, 1266 .no_wait_gpu = false, 1267 }; 1268 struct ttm_placement *placement; 1269 uint32_t alignment; 1270 size_t aligned_size; 1271 int err; 1272 1273 /* Only kernel objects should set GT */ 1274 xe_assert(xe, !tile || type == ttm_bo_type_kernel); 1275 1276 if (XE_WARN_ON(!size)) { 1277 xe_bo_free(bo); 1278 return ERR_PTR(-EINVAL); 1279 } 1280 1281 if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) && 1282 !(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) && 1283 ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) || 1284 (flags & (XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_NEEDS_2M)))) { 1285 size_t align = flags & XE_BO_FLAG_NEEDS_2M ? SZ_2M : SZ_64K; 1286 1287 aligned_size = ALIGN(size, align); 1288 if (type != ttm_bo_type_device) 1289 size = ALIGN(size, align); 1290 flags |= XE_BO_FLAG_INTERNAL_64K; 1291 alignment = align >> PAGE_SHIFT; 1292 } else { 1293 aligned_size = ALIGN(size, SZ_4K); 1294 flags &= ~XE_BO_FLAG_INTERNAL_64K; 1295 alignment = SZ_4K >> PAGE_SHIFT; 1296 } 1297 1298 if (type == ttm_bo_type_device && aligned_size != size) 1299 return ERR_PTR(-EINVAL); 1300 1301 if (!bo) { 1302 bo = xe_bo_alloc(); 1303 if (IS_ERR(bo)) 1304 return bo; 1305 } 1306 1307 bo->ccs_cleared = false; 1308 bo->tile = tile; 1309 bo->size = size; 1310 bo->flags = flags; 1311 bo->cpu_caching = cpu_caching; 1312 bo->ttm.base.funcs = &xe_gem_object_funcs; 1313 bo->ttm.priority = XE_BO_PRIORITY_NORMAL; 1314 INIT_LIST_HEAD(&bo->pinned_link); 1315 #ifdef CONFIG_PROC_FS 1316 INIT_LIST_HEAD(&bo->client_link); 1317 #endif 1318 INIT_LIST_HEAD(&bo->vram_userfault_link); 1319 1320 drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size); 1321 1322 if (resv) { 1323 ctx.allow_res_evict = !(flags & XE_BO_FLAG_NO_RESV_EVICT); 1324 ctx.resv = resv; 1325 } 1326 1327 if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) { 1328 err = __xe_bo_placement_for_flags(xe, bo, bo->flags); 1329 if (WARN_ON(err)) { 1330 xe_ttm_bo_destroy(&bo->ttm); 1331 return ERR_PTR(err); 1332 } 1333 } 1334 1335 /* Defer populating type_sg bos */ 1336 placement = (type == ttm_bo_type_sg || 1337 bo->flags & XE_BO_FLAG_DEFER_BACKING) ? &sys_placement : 1338 &bo->placement; 1339 err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type, 1340 placement, alignment, 1341 &ctx, NULL, resv, xe_ttm_bo_destroy); 1342 if (err) 1343 return ERR_PTR(err); 1344 1345 /* 1346 * The VRAM pages underneath are potentially still being accessed by the 1347 * GPU, as per async GPU clearing and async evictions. However TTM makes 1348 * sure to add any corresponding move/clear fences into the objects 1349 * dma-resv using the DMA_RESV_USAGE_KERNEL slot. 1350 * 1351 * For KMD internal buffers we don't care about GPU clearing, however we 1352 * still need to handle async evictions, where the VRAM is still being 1353 * accessed by the GPU. Most internal callers are not expecting this, 1354 * since they are missing the required synchronisation before accessing 1355 * the memory. To keep things simple just sync wait any kernel fences 1356 * here, if the buffer is designated KMD internal. 1357 * 1358 * For normal userspace objects we should already have the required 1359 * pipelining or sync waiting elsewhere, since we already have to deal 1360 * with things like async GPU clearing. 1361 */ 1362 if (type == ttm_bo_type_kernel) { 1363 long timeout = dma_resv_wait_timeout(bo->ttm.base.resv, 1364 DMA_RESV_USAGE_KERNEL, 1365 ctx.interruptible, 1366 MAX_SCHEDULE_TIMEOUT); 1367 1368 if (timeout < 0) { 1369 if (!resv) 1370 dma_resv_unlock(bo->ttm.base.resv); 1371 xe_bo_put(bo); 1372 return ERR_PTR(timeout); 1373 } 1374 } 1375 1376 bo->created = true; 1377 if (bulk) 1378 ttm_bo_set_bulk_move(&bo->ttm, bulk); 1379 else 1380 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); 1381 1382 return bo; 1383 } 1384 1385 static int __xe_bo_fixed_placement(struct xe_device *xe, 1386 struct xe_bo *bo, 1387 u32 flags, 1388 u64 start, u64 end, u64 size) 1389 { 1390 struct ttm_place *place = bo->placements; 1391 1392 if (flags & (XE_BO_FLAG_USER | XE_BO_FLAG_SYSTEM)) 1393 return -EINVAL; 1394 1395 place->flags = TTM_PL_FLAG_CONTIGUOUS; 1396 place->fpfn = start >> PAGE_SHIFT; 1397 place->lpfn = end >> PAGE_SHIFT; 1398 1399 switch (flags & (XE_BO_FLAG_STOLEN | XE_BO_FLAG_VRAM_MASK)) { 1400 case XE_BO_FLAG_VRAM0: 1401 place->mem_type = XE_PL_VRAM0; 1402 break; 1403 case XE_BO_FLAG_VRAM1: 1404 place->mem_type = XE_PL_VRAM1; 1405 break; 1406 case XE_BO_FLAG_STOLEN: 1407 place->mem_type = XE_PL_STOLEN; 1408 break; 1409 1410 default: 1411 /* 0 or multiple of the above set */ 1412 return -EINVAL; 1413 } 1414 1415 bo->placement = (struct ttm_placement) { 1416 .num_placement = 1, 1417 .placement = place, 1418 }; 1419 1420 return 0; 1421 } 1422 1423 static struct xe_bo * 1424 __xe_bo_create_locked(struct xe_device *xe, 1425 struct xe_tile *tile, struct xe_vm *vm, 1426 size_t size, u64 start, u64 end, 1427 u16 cpu_caching, enum ttm_bo_type type, u32 flags) 1428 { 1429 struct xe_bo *bo = NULL; 1430 int err; 1431 1432 if (vm) 1433 xe_vm_assert_held(vm); 1434 1435 if (start || end != ~0ULL) { 1436 bo = xe_bo_alloc(); 1437 if (IS_ERR(bo)) 1438 return bo; 1439 1440 flags |= XE_BO_FLAG_FIXED_PLACEMENT; 1441 err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size); 1442 if (err) { 1443 xe_bo_free(bo); 1444 return ERR_PTR(err); 1445 } 1446 } 1447 1448 bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL, 1449 vm && !xe_vm_in_fault_mode(vm) && 1450 flags & XE_BO_FLAG_USER ? 1451 &vm->lru_bulk_move : NULL, size, 1452 cpu_caching, type, flags); 1453 if (IS_ERR(bo)) 1454 return bo; 1455 1456 /* 1457 * Note that instead of taking a reference no the drm_gpuvm_resv_bo(), 1458 * to ensure the shared resv doesn't disappear under the bo, the bo 1459 * will keep a reference to the vm, and avoid circular references 1460 * by having all the vm's bo refereferences released at vm close 1461 * time. 1462 */ 1463 if (vm && xe_bo_is_user(bo)) 1464 xe_vm_get(vm); 1465 bo->vm = vm; 1466 1467 if (bo->flags & XE_BO_FLAG_GGTT) { 1468 if (!tile && flags & XE_BO_FLAG_STOLEN) 1469 tile = xe_device_get_root_tile(xe); 1470 1471 xe_assert(xe, tile); 1472 1473 if (flags & XE_BO_FLAG_FIXED_PLACEMENT) { 1474 err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo, 1475 start + bo->size, U64_MAX); 1476 } else { 1477 err = xe_ggtt_insert_bo(tile->mem.ggtt, bo); 1478 } 1479 if (err) 1480 goto err_unlock_put_bo; 1481 } 1482 1483 return bo; 1484 1485 err_unlock_put_bo: 1486 __xe_bo_unset_bulk_move(bo); 1487 xe_bo_unlock_vm_held(bo); 1488 xe_bo_put(bo); 1489 return ERR_PTR(err); 1490 } 1491 1492 struct xe_bo * 1493 xe_bo_create_locked_range(struct xe_device *xe, 1494 struct xe_tile *tile, struct xe_vm *vm, 1495 size_t size, u64 start, u64 end, 1496 enum ttm_bo_type type, u32 flags) 1497 { 1498 return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, flags); 1499 } 1500 1501 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, 1502 struct xe_vm *vm, size_t size, 1503 enum ttm_bo_type type, u32 flags) 1504 { 1505 return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, flags); 1506 } 1507 1508 struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, 1509 struct xe_vm *vm, size_t size, 1510 u16 cpu_caching, 1511 u32 flags) 1512 { 1513 struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 1514 cpu_caching, ttm_bo_type_device, 1515 flags | XE_BO_FLAG_USER); 1516 if (!IS_ERR(bo)) 1517 xe_bo_unlock_vm_held(bo); 1518 1519 return bo; 1520 } 1521 1522 struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile, 1523 struct xe_vm *vm, size_t size, 1524 enum ttm_bo_type type, u32 flags) 1525 { 1526 struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags); 1527 1528 if (!IS_ERR(bo)) 1529 xe_bo_unlock_vm_held(bo); 1530 1531 return bo; 1532 } 1533 1534 struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile, 1535 struct xe_vm *vm, 1536 size_t size, u64 offset, 1537 enum ttm_bo_type type, u32 flags) 1538 { 1539 struct xe_bo *bo; 1540 int err; 1541 u64 start = offset == ~0ull ? 0 : offset; 1542 u64 end = offset == ~0ull ? offset : start + size; 1543 1544 if (flags & XE_BO_FLAG_STOLEN && 1545 xe_ttm_stolen_cpu_access_needs_ggtt(xe)) 1546 flags |= XE_BO_FLAG_GGTT; 1547 1548 bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type, 1549 flags | XE_BO_FLAG_NEEDS_CPU_ACCESS); 1550 if (IS_ERR(bo)) 1551 return bo; 1552 1553 err = xe_bo_pin(bo); 1554 if (err) 1555 goto err_put; 1556 1557 err = xe_bo_vmap(bo); 1558 if (err) 1559 goto err_unpin; 1560 1561 xe_bo_unlock_vm_held(bo); 1562 1563 return bo; 1564 1565 err_unpin: 1566 xe_bo_unpin(bo); 1567 err_put: 1568 xe_bo_unlock_vm_held(bo); 1569 xe_bo_put(bo); 1570 return ERR_PTR(err); 1571 } 1572 1573 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, 1574 struct xe_vm *vm, size_t size, 1575 enum ttm_bo_type type, u32 flags) 1576 { 1577 return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags); 1578 } 1579 1580 struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, 1581 const void *data, size_t size, 1582 enum ttm_bo_type type, u32 flags) 1583 { 1584 struct xe_bo *bo = xe_bo_create_pin_map(xe, tile, NULL, 1585 ALIGN(size, PAGE_SIZE), 1586 type, flags); 1587 if (IS_ERR(bo)) 1588 return bo; 1589 1590 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); 1591 1592 return bo; 1593 } 1594 1595 static void __xe_bo_unpin_map_no_vm(void *arg) 1596 { 1597 xe_bo_unpin_map_no_vm(arg); 1598 } 1599 1600 struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, 1601 size_t size, u32 flags) 1602 { 1603 struct xe_bo *bo; 1604 int ret; 1605 1606 bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags); 1607 if (IS_ERR(bo)) 1608 return bo; 1609 1610 ret = devm_add_action_or_reset(xe->drm.dev, __xe_bo_unpin_map_no_vm, bo); 1611 if (ret) 1612 return ERR_PTR(ret); 1613 1614 return bo; 1615 } 1616 1617 struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, 1618 const void *data, size_t size, u32 flags) 1619 { 1620 struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, ALIGN(size, PAGE_SIZE), flags); 1621 1622 if (IS_ERR(bo)) 1623 return bo; 1624 1625 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); 1626 1627 return bo; 1628 } 1629 1630 /** 1631 * xe_managed_bo_reinit_in_vram 1632 * @xe: xe device 1633 * @tile: Tile where the new buffer will be created 1634 * @src: Managed buffer object allocated in system memory 1635 * 1636 * Replace a managed src buffer object allocated in system memory with a new 1637 * one allocated in vram, copying the data between them. 1638 * Buffer object in VRAM is not going to have the same GGTT address, the caller 1639 * is responsible for making sure that any old references to it are updated. 1640 * 1641 * Returns 0 for success, negative error code otherwise. 1642 */ 1643 int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src) 1644 { 1645 struct xe_bo *bo; 1646 u32 dst_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT; 1647 1648 dst_flags |= (*src)->flags & XE_BO_FLAG_GGTT_INVALIDATE; 1649 1650 xe_assert(xe, IS_DGFX(xe)); 1651 xe_assert(xe, !(*src)->vmap.is_iomem); 1652 1653 bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, 1654 (*src)->size, dst_flags); 1655 if (IS_ERR(bo)) 1656 return PTR_ERR(bo); 1657 1658 devm_release_action(xe->drm.dev, __xe_bo_unpin_map_no_vm, *src); 1659 *src = bo; 1660 1661 return 0; 1662 } 1663 1664 /* 1665 * XXX: This is in the VM bind data path, likely should calculate this once and 1666 * store, with a recalculation if the BO is moved. 1667 */ 1668 uint64_t vram_region_gpu_offset(struct ttm_resource *res) 1669 { 1670 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); 1671 1672 if (res->mem_type == XE_PL_STOLEN) 1673 return xe_ttm_stolen_gpu_offset(xe); 1674 1675 return res_to_mem_region(res)->dpa_base; 1676 } 1677 1678 /** 1679 * xe_bo_pin_external - pin an external BO 1680 * @bo: buffer object to be pinned 1681 * 1682 * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD) 1683 * BO. Unique call compared to xe_bo_pin as this function has it own set of 1684 * asserts and code to ensure evict / restore on suspend / resume. 1685 * 1686 * Returns 0 for success, negative error code otherwise. 1687 */ 1688 int xe_bo_pin_external(struct xe_bo *bo) 1689 { 1690 struct xe_device *xe = xe_bo_device(bo); 1691 int err; 1692 1693 xe_assert(xe, !bo->vm); 1694 xe_assert(xe, xe_bo_is_user(bo)); 1695 1696 if (!xe_bo_is_pinned(bo)) { 1697 err = xe_bo_validate(bo, NULL, false); 1698 if (err) 1699 return err; 1700 1701 if (xe_bo_is_vram(bo)) { 1702 spin_lock(&xe->pinned.lock); 1703 list_add_tail(&bo->pinned_link, 1704 &xe->pinned.external_vram); 1705 spin_unlock(&xe->pinned.lock); 1706 } 1707 } 1708 1709 ttm_bo_pin(&bo->ttm); 1710 1711 /* 1712 * FIXME: If we always use the reserve / unreserve functions for locking 1713 * we do not need this. 1714 */ 1715 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); 1716 1717 return 0; 1718 } 1719 1720 int xe_bo_pin(struct xe_bo *bo) 1721 { 1722 struct xe_device *xe = xe_bo_device(bo); 1723 int err; 1724 1725 /* We currently don't expect user BO to be pinned */ 1726 xe_assert(xe, !xe_bo_is_user(bo)); 1727 1728 /* Pinned object must be in GGTT or have pinned flag */ 1729 xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED | 1730 XE_BO_FLAG_GGTT)); 1731 1732 /* 1733 * No reason we can't support pinning imported dma-bufs we just don't 1734 * expect to pin an imported dma-buf. 1735 */ 1736 xe_assert(xe, !bo->ttm.base.import_attach); 1737 1738 /* We only expect at most 1 pin */ 1739 xe_assert(xe, !xe_bo_is_pinned(bo)); 1740 1741 err = xe_bo_validate(bo, NULL, false); 1742 if (err) 1743 return err; 1744 1745 /* 1746 * For pinned objects in on DGFX, which are also in vram, we expect 1747 * these to be in contiguous VRAM memory. Required eviction / restore 1748 * during suspend / resume (force restore to same physical address). 1749 */ 1750 if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) && 1751 bo->flags & XE_BO_FLAG_INTERNAL_TEST)) { 1752 struct ttm_place *place = &(bo->placements[0]); 1753 1754 if (mem_type_is_vram(place->mem_type)) { 1755 xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS); 1756 1757 place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) - 1758 vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT; 1759 place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT); 1760 1761 spin_lock(&xe->pinned.lock); 1762 list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present); 1763 spin_unlock(&xe->pinned.lock); 1764 } 1765 } 1766 1767 ttm_bo_pin(&bo->ttm); 1768 1769 /* 1770 * FIXME: If we always use the reserve / unreserve functions for locking 1771 * we do not need this. 1772 */ 1773 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); 1774 1775 return 0; 1776 } 1777 1778 /** 1779 * xe_bo_unpin_external - unpin an external BO 1780 * @bo: buffer object to be unpinned 1781 * 1782 * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD) 1783 * BO. Unique call compared to xe_bo_unpin as this function has it own set of 1784 * asserts and code to ensure evict / restore on suspend / resume. 1785 * 1786 * Returns 0 for success, negative error code otherwise. 1787 */ 1788 void xe_bo_unpin_external(struct xe_bo *bo) 1789 { 1790 struct xe_device *xe = xe_bo_device(bo); 1791 1792 xe_assert(xe, !bo->vm); 1793 xe_assert(xe, xe_bo_is_pinned(bo)); 1794 xe_assert(xe, xe_bo_is_user(bo)); 1795 1796 spin_lock(&xe->pinned.lock); 1797 if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) 1798 list_del_init(&bo->pinned_link); 1799 spin_unlock(&xe->pinned.lock); 1800 1801 ttm_bo_unpin(&bo->ttm); 1802 1803 /* 1804 * FIXME: If we always use the reserve / unreserve functions for locking 1805 * we do not need this. 1806 */ 1807 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm); 1808 } 1809 1810 void xe_bo_unpin(struct xe_bo *bo) 1811 { 1812 struct xe_device *xe = xe_bo_device(bo); 1813 1814 xe_assert(xe, !bo->ttm.base.import_attach); 1815 xe_assert(xe, xe_bo_is_pinned(bo)); 1816 1817 if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) && 1818 bo->flags & XE_BO_FLAG_INTERNAL_TEST)) { 1819 struct ttm_place *place = &(bo->placements[0]); 1820 1821 if (mem_type_is_vram(place->mem_type)) { 1822 spin_lock(&xe->pinned.lock); 1823 xe_assert(xe, !list_empty(&bo->pinned_link)); 1824 list_del_init(&bo->pinned_link); 1825 spin_unlock(&xe->pinned.lock); 1826 } 1827 } 1828 1829 ttm_bo_unpin(&bo->ttm); 1830 } 1831 1832 /** 1833 * xe_bo_validate() - Make sure the bo is in an allowed placement 1834 * @bo: The bo, 1835 * @vm: Pointer to a the vm the bo shares a locked dma_resv object with, or 1836 * NULL. Used together with @allow_res_evict. 1837 * @allow_res_evict: Whether it's allowed to evict bos sharing @vm's 1838 * reservation object. 1839 * 1840 * Make sure the bo is in allowed placement, migrating it if necessary. If 1841 * needed, other bos will be evicted. If bos selected for eviction shares 1842 * the @vm's reservation object, they can be evicted iff @allow_res_evict is 1843 * set to true, otherwise they will be bypassed. 1844 * 1845 * Return: 0 on success, negative error code on failure. May return 1846 * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal. 1847 */ 1848 int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict) 1849 { 1850 struct ttm_operation_ctx ctx = { 1851 .interruptible = true, 1852 .no_wait_gpu = false, 1853 }; 1854 1855 if (vm) { 1856 lockdep_assert_held(&vm->lock); 1857 xe_vm_assert_held(vm); 1858 1859 ctx.allow_res_evict = allow_res_evict; 1860 ctx.resv = xe_vm_resv(vm); 1861 } 1862 1863 return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx); 1864 } 1865 1866 bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo) 1867 { 1868 if (bo->destroy == &xe_ttm_bo_destroy) 1869 return true; 1870 1871 return false; 1872 } 1873 1874 /* 1875 * Resolve a BO address. There is no assert to check if the proper lock is held 1876 * so it should only be used in cases where it is not fatal to get the wrong 1877 * address, such as printing debug information, but not in cases where memory is 1878 * written based on this result. 1879 */ 1880 dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size) 1881 { 1882 struct xe_device *xe = xe_bo_device(bo); 1883 struct xe_res_cursor cur; 1884 u64 page; 1885 1886 xe_assert(xe, page_size <= PAGE_SIZE); 1887 page = offset >> PAGE_SHIFT; 1888 offset &= (PAGE_SIZE - 1); 1889 1890 if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) { 1891 xe_assert(xe, bo->ttm.ttm); 1892 1893 xe_res_first_sg(xe_bo_sg(bo), page << PAGE_SHIFT, 1894 page_size, &cur); 1895 return xe_res_dma(&cur) + offset; 1896 } else { 1897 struct xe_res_cursor cur; 1898 1899 xe_res_first(bo->ttm.resource, page << PAGE_SHIFT, 1900 page_size, &cur); 1901 return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource); 1902 } 1903 } 1904 1905 dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size) 1906 { 1907 if (!READ_ONCE(bo->ttm.pin_count)) 1908 xe_bo_assert_held(bo); 1909 return __xe_bo_addr(bo, offset, page_size); 1910 } 1911 1912 int xe_bo_vmap(struct xe_bo *bo) 1913 { 1914 void *virtual; 1915 bool is_iomem; 1916 int ret; 1917 1918 xe_bo_assert_held(bo); 1919 1920 if (!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) 1921 return -EINVAL; 1922 1923 if (!iosys_map_is_null(&bo->vmap)) 1924 return 0; 1925 1926 /* 1927 * We use this more or less deprecated interface for now since 1928 * ttm_bo_vmap() doesn't offer the optimization of kmapping 1929 * single page bos, which is done here. 1930 * TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap 1931 * to use struct iosys_map. 1932 */ 1933 ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap); 1934 if (ret) 1935 return ret; 1936 1937 virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 1938 if (is_iomem) 1939 iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual); 1940 else 1941 iosys_map_set_vaddr(&bo->vmap, virtual); 1942 1943 return 0; 1944 } 1945 1946 static void __xe_bo_vunmap(struct xe_bo *bo) 1947 { 1948 if (!iosys_map_is_null(&bo->vmap)) { 1949 iosys_map_clear(&bo->vmap); 1950 ttm_bo_kunmap(&bo->kmap); 1951 } 1952 } 1953 1954 void xe_bo_vunmap(struct xe_bo *bo) 1955 { 1956 xe_bo_assert_held(bo); 1957 __xe_bo_vunmap(bo); 1958 } 1959 1960 int xe_gem_create_ioctl(struct drm_device *dev, void *data, 1961 struct drm_file *file) 1962 { 1963 struct xe_device *xe = to_xe_device(dev); 1964 struct xe_file *xef = to_xe_file(file); 1965 struct drm_xe_gem_create *args = data; 1966 struct xe_vm *vm = NULL; 1967 struct xe_bo *bo; 1968 unsigned int bo_flags; 1969 u32 handle; 1970 int err; 1971 1972 if (XE_IOCTL_DBG(xe, args->extensions) || 1973 XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || 1974 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 1975 return -EINVAL; 1976 1977 /* at least one valid memory placement must be specified */ 1978 if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) || 1979 !args->placement)) 1980 return -EINVAL; 1981 1982 if (XE_IOCTL_DBG(xe, args->flags & 1983 ~(DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING | 1984 DRM_XE_GEM_CREATE_FLAG_SCANOUT | 1985 DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM))) 1986 return -EINVAL; 1987 1988 if (XE_IOCTL_DBG(xe, args->handle)) 1989 return -EINVAL; 1990 1991 if (XE_IOCTL_DBG(xe, !args->size)) 1992 return -EINVAL; 1993 1994 if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX)) 1995 return -EINVAL; 1996 1997 if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK)) 1998 return -EINVAL; 1999 2000 bo_flags = 0; 2001 if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING) 2002 bo_flags |= XE_BO_FLAG_DEFER_BACKING; 2003 2004 if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT) 2005 bo_flags |= XE_BO_FLAG_SCANOUT; 2006 2007 bo_flags |= args->placement << (ffs(XE_BO_FLAG_SYSTEM) - 1); 2008 2009 /* CCS formats need physical placement at a 64K alignment in VRAM. */ 2010 if ((bo_flags & XE_BO_FLAG_VRAM_MASK) && 2011 (bo_flags & XE_BO_FLAG_SCANOUT) && 2012 !(xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) && 2013 IS_ALIGNED(args->size, SZ_64K)) 2014 bo_flags |= XE_BO_FLAG_NEEDS_64K; 2015 2016 if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) { 2017 if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK))) 2018 return -EINVAL; 2019 2020 bo_flags |= XE_BO_FLAG_NEEDS_CPU_ACCESS; 2021 } 2022 2023 if (XE_IOCTL_DBG(xe, !args->cpu_caching || 2024 args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC)) 2025 return -EINVAL; 2026 2027 if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_VRAM_MASK && 2028 args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC)) 2029 return -EINVAL; 2030 2031 if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_SCANOUT && 2032 args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) 2033 return -EINVAL; 2034 2035 if (args->vm_id) { 2036 vm = xe_vm_lookup(xef, args->vm_id); 2037 if (XE_IOCTL_DBG(xe, !vm)) 2038 return -ENOENT; 2039 err = xe_vm_lock(vm, true); 2040 if (err) 2041 goto out_vm; 2042 } 2043 2044 bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching, 2045 bo_flags); 2046 2047 if (vm) 2048 xe_vm_unlock(vm); 2049 2050 if (IS_ERR(bo)) { 2051 err = PTR_ERR(bo); 2052 goto out_vm; 2053 } 2054 2055 err = drm_gem_handle_create(file, &bo->ttm.base, &handle); 2056 if (err) 2057 goto out_bulk; 2058 2059 args->handle = handle; 2060 goto out_put; 2061 2062 out_bulk: 2063 if (vm && !xe_vm_in_fault_mode(vm)) { 2064 xe_vm_lock(vm, false); 2065 __xe_bo_unset_bulk_move(bo); 2066 xe_vm_unlock(vm); 2067 } 2068 out_put: 2069 xe_bo_put(bo); 2070 out_vm: 2071 if (vm) 2072 xe_vm_put(vm); 2073 2074 return err; 2075 } 2076 2077 int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, 2078 struct drm_file *file) 2079 { 2080 struct xe_device *xe = to_xe_device(dev); 2081 struct drm_xe_gem_mmap_offset *args = data; 2082 struct drm_gem_object *gem_obj; 2083 2084 if (XE_IOCTL_DBG(xe, args->extensions) || 2085 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 2086 return -EINVAL; 2087 2088 if (XE_IOCTL_DBG(xe, args->flags)) 2089 return -EINVAL; 2090 2091 gem_obj = drm_gem_object_lookup(file, args->handle); 2092 if (XE_IOCTL_DBG(xe, !gem_obj)) 2093 return -ENOENT; 2094 2095 /* The mmap offset was set up at BO allocation time. */ 2096 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); 2097 2098 xe_bo_put(gem_to_xe_bo(gem_obj)); 2099 return 0; 2100 } 2101 2102 /** 2103 * xe_bo_lock() - Lock the buffer object's dma_resv object 2104 * @bo: The struct xe_bo whose lock is to be taken 2105 * @intr: Whether to perform any wait interruptible 2106 * 2107 * Locks the buffer object's dma_resv object. If the buffer object is 2108 * pointing to a shared dma_resv object, that shared lock is locked. 2109 * 2110 * Return: 0 on success, -EINTR if @intr is true and the wait for a 2111 * contended lock was interrupted. If @intr is set to false, the 2112 * function always returns 0. 2113 */ 2114 int xe_bo_lock(struct xe_bo *bo, bool intr) 2115 { 2116 if (intr) 2117 return dma_resv_lock_interruptible(bo->ttm.base.resv, NULL); 2118 2119 dma_resv_lock(bo->ttm.base.resv, NULL); 2120 2121 return 0; 2122 } 2123 2124 /** 2125 * xe_bo_unlock() - Unlock the buffer object's dma_resv object 2126 * @bo: The struct xe_bo whose lock is to be released. 2127 * 2128 * Unlock a buffer object lock that was locked by xe_bo_lock(). 2129 */ 2130 void xe_bo_unlock(struct xe_bo *bo) 2131 { 2132 dma_resv_unlock(bo->ttm.base.resv); 2133 } 2134 2135 /** 2136 * xe_bo_can_migrate - Whether a buffer object likely can be migrated 2137 * @bo: The buffer object to migrate 2138 * @mem_type: The TTM memory type intended to migrate to 2139 * 2140 * Check whether the buffer object supports migration to the 2141 * given memory type. Note that pinning may affect the ability to migrate as 2142 * returned by this function. 2143 * 2144 * This function is primarily intended as a helper for checking the 2145 * possibility to migrate buffer objects and can be called without 2146 * the object lock held. 2147 * 2148 * Return: true if migration is possible, false otherwise. 2149 */ 2150 bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type) 2151 { 2152 unsigned int cur_place; 2153 2154 if (bo->ttm.type == ttm_bo_type_kernel) 2155 return true; 2156 2157 if (bo->ttm.type == ttm_bo_type_sg) 2158 return false; 2159 2160 for (cur_place = 0; cur_place < bo->placement.num_placement; 2161 cur_place++) { 2162 if (bo->placements[cur_place].mem_type == mem_type) 2163 return true; 2164 } 2165 2166 return false; 2167 } 2168 2169 static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place) 2170 { 2171 memset(place, 0, sizeof(*place)); 2172 place->mem_type = mem_type; 2173 } 2174 2175 /** 2176 * xe_bo_migrate - Migrate an object to the desired region id 2177 * @bo: The buffer object to migrate. 2178 * @mem_type: The TTM region type to migrate to. 2179 * 2180 * Attempt to migrate the buffer object to the desired memory region. The 2181 * buffer object may not be pinned, and must be locked. 2182 * On successful completion, the object memory type will be updated, 2183 * but an async migration task may not have completed yet, and to 2184 * accomplish that, the object's kernel fences must be signaled with 2185 * the object lock held. 2186 * 2187 * Return: 0 on success. Negative error code on failure. In particular may 2188 * return -EINTR or -ERESTARTSYS if signal pending. 2189 */ 2190 int xe_bo_migrate(struct xe_bo *bo, u32 mem_type) 2191 { 2192 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); 2193 struct ttm_operation_ctx ctx = { 2194 .interruptible = true, 2195 .no_wait_gpu = false, 2196 }; 2197 struct ttm_placement placement; 2198 struct ttm_place requested; 2199 2200 xe_bo_assert_held(bo); 2201 2202 if (bo->ttm.resource->mem_type == mem_type) 2203 return 0; 2204 2205 if (xe_bo_is_pinned(bo)) 2206 return -EBUSY; 2207 2208 if (!xe_bo_can_migrate(bo, mem_type)) 2209 return -EINVAL; 2210 2211 xe_place_from_ttm_type(mem_type, &requested); 2212 placement.num_placement = 1; 2213 placement.placement = &requested; 2214 2215 /* 2216 * Stolen needs to be handled like below VRAM handling if we ever need 2217 * to support it. 2218 */ 2219 drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN); 2220 2221 if (mem_type_is_vram(mem_type)) { 2222 u32 c = 0; 2223 2224 add_vram(xe, bo, &requested, bo->flags, mem_type, &c); 2225 } 2226 2227 return ttm_bo_validate(&bo->ttm, &placement, &ctx); 2228 } 2229 2230 /** 2231 * xe_bo_evict - Evict an object to evict placement 2232 * @bo: The buffer object to migrate. 2233 * @force_alloc: Set force_alloc in ttm_operation_ctx 2234 * 2235 * On successful completion, the object memory will be moved to evict 2236 * placement. Ths function blocks until the object has been fully moved. 2237 * 2238 * Return: 0 on success. Negative error code on failure. 2239 */ 2240 int xe_bo_evict(struct xe_bo *bo, bool force_alloc) 2241 { 2242 struct ttm_operation_ctx ctx = { 2243 .interruptible = false, 2244 .no_wait_gpu = false, 2245 .force_alloc = force_alloc, 2246 }; 2247 struct ttm_placement placement; 2248 int ret; 2249 2250 xe_evict_flags(&bo->ttm, &placement); 2251 ret = ttm_bo_validate(&bo->ttm, &placement, &ctx); 2252 if (ret) 2253 return ret; 2254 2255 dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL, 2256 false, MAX_SCHEDULE_TIMEOUT); 2257 2258 return 0; 2259 } 2260 2261 /** 2262 * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when 2263 * placed in system memory. 2264 * @bo: The xe_bo 2265 * 2266 * Return: true if extra pages need to be allocated, false otherwise. 2267 */ 2268 bool xe_bo_needs_ccs_pages(struct xe_bo *bo) 2269 { 2270 struct xe_device *xe = xe_bo_device(bo); 2271 2272 if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe)) 2273 return false; 2274 2275 if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device) 2276 return false; 2277 2278 /* On discrete GPUs, if the GPU can access this buffer from 2279 * system memory (i.e., it allows XE_PL_TT placement), FlatCCS 2280 * can't be used since there's no CCS storage associated with 2281 * non-VRAM addresses. 2282 */ 2283 if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM)) 2284 return false; 2285 2286 return true; 2287 } 2288 2289 /** 2290 * __xe_bo_release_dummy() - Dummy kref release function 2291 * @kref: The embedded struct kref. 2292 * 2293 * Dummy release function for xe_bo_put_deferred(). Keep off. 2294 */ 2295 void __xe_bo_release_dummy(struct kref *kref) 2296 { 2297 } 2298 2299 /** 2300 * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred(). 2301 * @deferred: The lockless list used for the call to xe_bo_put_deferred(). 2302 * 2303 * Puts all bos whose put was deferred by xe_bo_put_deferred(). 2304 * The @deferred list can be either an onstack local list or a global 2305 * shared list used by a workqueue. 2306 */ 2307 void xe_bo_put_commit(struct llist_head *deferred) 2308 { 2309 struct llist_node *freed; 2310 struct xe_bo *bo, *next; 2311 2312 if (!deferred) 2313 return; 2314 2315 freed = llist_del_all(deferred); 2316 if (!freed) 2317 return; 2318 2319 llist_for_each_entry_safe(bo, next, freed, freed) 2320 drm_gem_object_free(&bo->ttm.base.refcount); 2321 } 2322 2323 void xe_bo_put(struct xe_bo *bo) 2324 { 2325 might_sleep(); 2326 if (bo) { 2327 #ifdef CONFIG_PROC_FS 2328 if (bo->client) 2329 might_lock(&bo->client->bos_lock); 2330 #endif 2331 if (bo->ggtt_node && bo->ggtt_node->ggtt) 2332 might_lock(&bo->ggtt_node->ggtt->lock); 2333 drm_gem_object_put(&bo->ttm.base); 2334 } 2335 } 2336 2337 /** 2338 * xe_bo_dumb_create - Create a dumb bo as backing for a fb 2339 * @file_priv: ... 2340 * @dev: ... 2341 * @args: ... 2342 * 2343 * See dumb_create() hook in include/drm/drm_drv.h 2344 * 2345 * Return: ... 2346 */ 2347 int xe_bo_dumb_create(struct drm_file *file_priv, 2348 struct drm_device *dev, 2349 struct drm_mode_create_dumb *args) 2350 { 2351 struct xe_device *xe = to_xe_device(dev); 2352 struct xe_bo *bo; 2353 uint32_t handle; 2354 int cpp = DIV_ROUND_UP(args->bpp, 8); 2355 int err; 2356 u32 page_size = max_t(u32, PAGE_SIZE, 2357 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K); 2358 2359 args->pitch = ALIGN(args->width * cpp, 64); 2360 args->size = ALIGN(mul_u32_u32(args->pitch, args->height), 2361 page_size); 2362 2363 bo = xe_bo_create_user(xe, NULL, NULL, args->size, 2364 DRM_XE_GEM_CPU_CACHING_WC, 2365 XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | 2366 XE_BO_FLAG_SCANOUT | 2367 XE_BO_FLAG_NEEDS_CPU_ACCESS); 2368 if (IS_ERR(bo)) 2369 return PTR_ERR(bo); 2370 2371 err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle); 2372 /* drop reference from allocate - handle holds it now */ 2373 drm_gem_object_put(&bo->ttm.base); 2374 if (!err) 2375 args->handle = handle; 2376 return err; 2377 } 2378 2379 void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo) 2380 { 2381 struct ttm_buffer_object *tbo = &bo->ttm; 2382 struct ttm_device *bdev = tbo->bdev; 2383 2384 drm_vma_node_unmap(&tbo->base.vma_node, bdev->dev_mapping); 2385 2386 list_del_init(&bo->vram_userfault_link); 2387 } 2388 2389 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 2390 #include "tests/xe_bo.c" 2391 #endif 2392